mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-26 09:32:01 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
204f2e1381
@ -40,8 +40,6 @@ Every month we get together with the community (users, contributors, customers,
|
||||
|
||||
Keep an eye out for upcoming meetups and events around the world. Somewhere else you want us to be? Please feel free to reach out to tyler `<at>` clickhouse `<dot>` com. You can also peruse [ClickHouse Events](https://clickhouse.com/company/news-events) for a list of all upcoming trainings, meetups, speaking engagements, etc.
|
||||
|
||||
* [AWS Summit in DC](https://clickhouse.com/company/events/2024-06-aws-summit-dc) - Jun 26
|
||||
* [ClickHouse Meetup in Amsterdam](https://www.meetup.com/clickhouse-netherlands-user-group/events/300781068/) - Jun 27
|
||||
* [ClickHouse Meetup in Paris](https://www.meetup.com/clickhouse-france-user-group/events/300783448/) - Jul 9
|
||||
* [ClickHouse Cloud - Live Update Call](https://clickhouse.com/company/events/202407-cloud-update-live) - Jul 9
|
||||
* [ClickHouse Meetup @ Ramp - New York City](https://www.meetup.com/clickhouse-new-york-user-group/events/300595845/) - Jul 9
|
||||
|
@ -21,6 +21,8 @@
|
||||
#include <atomic>
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
#include "Poco/Channel.h"
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include "Poco/Foundation.h"
|
||||
#include "Poco/Timestamp.h"
|
||||
|
||||
|
@ -84,5 +84,5 @@ if (CMAKE_CROSSCOMPILING)
|
||||
message (FATAL_ERROR "Trying to cross-compile to unsupported system: ${CMAKE_SYSTEM_NAME}!")
|
||||
endif ()
|
||||
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILE_TARGET}")
|
||||
message (STATUS "Cross-compiling for target: ${CMAKE_CXX_COMPILER_TARGET}")
|
||||
endif ()
|
||||
|
2
contrib/CMakeLists.txt
vendored
2
contrib/CMakeLists.txt
vendored
@ -228,6 +228,8 @@ add_contrib (ulid-c-cmake ulid-c)
|
||||
|
||||
add_contrib (libssh-cmake libssh)
|
||||
|
||||
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||
|
||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||
# in "contrib/..." as originally planned, so we workaround this by fixing FOLDER properties of all targets manually,
|
||||
|
@ -125,7 +125,7 @@ configure_file("${AWS_SDK_CORE_DIR}/include/aws/core/SDKConfig.h.in"
|
||||
"${CMAKE_CURRENT_BINARY_DIR}/include/aws/core/SDKConfig.h" @ONLY)
|
||||
|
||||
aws_get_version(AWS_CRT_CPP_VERSION_MAJOR AWS_CRT_CPP_VERSION_MINOR AWS_CRT_CPP_VERSION_PATCH FULL_VERSION GIT_HASH)
|
||||
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${AWS_CRT_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||
configure_file("${AWS_CRT_DIR}/include/aws/crt/Config.h.in" "${CMAKE_CURRENT_BINARY_DIR}/include/aws/crt/Config.h" @ONLY)
|
||||
|
||||
list(APPEND AWS_SOURCES ${AWS_SDK_CORE_SRC} ${AWS_SDK_CORE_NET_SRC} ${AWS_SDK_CORE_PLATFORM_SRC})
|
||||
|
||||
|
2
contrib/azure
vendored
2
contrib/azure
vendored
@ -1 +1 @@
|
||||
Subproject commit 6262a76ef4c4c330c84e58dd4f6f13f4e6230fcd
|
||||
Subproject commit 92c94d7f37a43cc8fc4d466884a95f610c0593bf
|
@ -157,15 +157,13 @@ function(protobuf_generate)
|
||||
|
||||
set(_generated_srcs_all)
|
||||
foreach(_proto ${protobuf_generate_PROTOS})
|
||||
get_filename_component(_abs_file ${_proto} ABSOLUTE)
|
||||
get_filename_component(_abs_dir ${_abs_file} DIRECTORY)
|
||||
get_filename_component(_basename ${_proto} NAME_WE)
|
||||
file(RELATIVE_PATH _rel_dir ${CMAKE_CURRENT_SOURCE_DIR} ${_abs_dir})
|
||||
|
||||
set(_possible_rel_dir)
|
||||
if (NOT protobuf_generate_APPEND_PATH)
|
||||
set(_possible_rel_dir ${_rel_dir}/)
|
||||
endif()
|
||||
# The protobuf compiler doesn't return paths to the files it generates so we have to calculate those paths here:
|
||||
# _abs_file - absolute path to a .proto file,
|
||||
# _possible_rel_dir - relative path to the .proto file from some import directory specified in Protobuf_IMPORT_DIRS,
|
||||
# _basename - filename of the .proto file (without path and without extenstion).
|
||||
get_proto_absolute_path(_abs_file "${_proto}" ${_protobuf_include_path})
|
||||
get_proto_relative_path(_possible_rel_dir "${_abs_file}" ${_protobuf_include_path})
|
||||
get_filename_component(_basename "${_abs_file}" NAME_WE)
|
||||
|
||||
set(_generated_srcs)
|
||||
foreach(_ext ${protobuf_generate_GENERATE_EXTENSIONS})
|
||||
@ -173,7 +171,7 @@ function(protobuf_generate)
|
||||
endforeach()
|
||||
|
||||
if(protobuf_generate_DESCRIPTORS AND protobuf_generate_LANGUAGE STREQUAL cpp)
|
||||
set(_descriptor_file "${CMAKE_CURRENT_BINARY_DIR}/${_basename}.desc")
|
||||
set(_descriptor_file "${protobuf_generate_PROTOC_OUT_DIR}/${_possible_rel_dir}${_basename}.desc")
|
||||
set(_dll_desc_out "--descriptor_set_out=${_descriptor_file}")
|
||||
list(APPEND _generated_srcs ${_descriptor_file})
|
||||
endif()
|
||||
@ -196,3 +194,36 @@ function(protobuf_generate)
|
||||
target_sources(${protobuf_generate_TARGET} PRIVATE ${_generated_srcs_all})
|
||||
endif()
|
||||
endfunction()
|
||||
|
||||
# Calculates the absolute path to a .proto file.
|
||||
function(get_proto_absolute_path result proto)
|
||||
cmake_path(IS_ABSOLUTE proto _is_abs_path)
|
||||
if(_is_abs_path)
|
||||
set(${result} "${proto}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
foreach(_include_dir ${ARGN})
|
||||
if(EXISTS "${_include_dir}/${proto}")
|
||||
set(${result} "${_include_dir}/${proto}" PARENT_SCOPE)
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
message(SEND_ERROR "Not found protobuf ${proto} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||
endfunction()
|
||||
|
||||
# Calculates a relative path to a .proto file. The returned path is relative to one of include directories.
|
||||
function(get_proto_relative_path result abs_path)
|
||||
set(${result} "" PARENT_SCOPE)
|
||||
get_filename_component(_abs_dir "${abs_path}" DIRECTORY)
|
||||
foreach(_include_dir ${ARGN})
|
||||
cmake_path(IS_PREFIX _include_dir "${_abs_dir}" _is_prefix)
|
||||
if(_is_prefix)
|
||||
file(RELATIVE_PATH _rel_dir "${_include_dir}" "${_abs_dir}")
|
||||
if(NOT _rel_dir STREQUAL "")
|
||||
set(${result} "${_rel_dir}/" PARENT_SCOPE)
|
||||
endif()
|
||||
return()
|
||||
endif()
|
||||
endforeach()
|
||||
message(WARNING "Not found protobuf ${abs_path} in Protobuf_IMPORT_DIRS: ${ARGN}")
|
||||
endfunction()
|
||||
|
@ -5,7 +5,7 @@ else ()
|
||||
endif ()
|
||||
|
||||
if (NOT ENABLE_ICU)
|
||||
message(STATUS "Not using icu")
|
||||
message(STATUS "Not using ICU")
|
||||
return()
|
||||
endif()
|
||||
|
||||
|
@ -34,7 +34,11 @@ if (OS_LINUX)
|
||||
# avoid spurious latencies and additional work associated with
|
||||
# MADV_DONTNEED. See
|
||||
# https://github.com/ClickHouse/ClickHouse/issues/11121 for motivation.
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "percpu_arena:percpu,oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000,prof:true,prof_active:false,background_thread:true")
|
||||
endif()
|
||||
else()
|
||||
set (JEMALLOC_CONFIG_MALLOC_CONF "oversize_threshold:0,muzzy_decay_ms:0,dirty_decay_ms:5000")
|
||||
endif()
|
||||
|
2
contrib/orc
vendored
2
contrib/orc
vendored
@ -1 +1 @@
|
||||
Subproject commit 947cebaf9432d708253ac08dc3012daa6b4ede6f
|
||||
Subproject commit bcc025c09828c556f54cfbdf83a66b9acae7d17f
|
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
34
contrib/prometheus-protobufs-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,34 @@
|
||||
option(ENABLE_PROMETHEUS_PROTOBUFS "Enable Prometheus Protobufs" ${ENABLE_PROTOBUF})
|
||||
|
||||
if(NOT ENABLE_PROMETHEUS_PROTOBUFS)
|
||||
message(STATUS "Not using prometheus-protobufs")
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
|
||||
set(Prometheus_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs")
|
||||
set(GogoProto_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/prometheus-protobufs-gogo")
|
||||
|
||||
# Protobuf_IMPORT_DIRS specify where the protobuf compiler will look for .proto files.
|
||||
set(Old_Protobuf_IMPORT_DIRS ${Protobuf_IMPORT_DIRS})
|
||||
list(APPEND Protobuf_IMPORT_DIRS "${Protobuf_INCLUDE_DIR}" "${Prometheus_INCLUDE_DIR}" "${GogoProto_INCLUDE_DIR}")
|
||||
|
||||
PROTOBUF_GENERATE_CPP(prometheus_protobufs_sources prometheus_protobufs_headers
|
||||
"prompb/remote.proto"
|
||||
"prompb/types.proto"
|
||||
"gogoproto/gogo.proto"
|
||||
)
|
||||
|
||||
set(Protobuf_IMPORT_DIRS ${Old_Protobuf_IMPORT_DIRS})
|
||||
|
||||
# Ignore warnings while compiling protobuf-generated *.pb.h and *.pb.cpp files.
|
||||
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
|
||||
|
||||
# Disable clang-tidy for protobuf-generated *.pb.h and *.pb.cpp files.
|
||||
set (CMAKE_CXX_CLANG_TIDY "")
|
||||
|
||||
add_library(_prometheus_protobufs ${prometheus_protobufs_sources} ${prometheus_protobufs_headers})
|
||||
target_include_directories(_prometheus_protobufs SYSTEM PUBLIC "${CMAKE_CURRENT_BINARY_DIR}")
|
||||
target_link_libraries (_prometheus_protobufs PUBLIC ch_contrib::protobuf)
|
||||
|
||||
add_library (ch_contrib::prometheus_protobufs ALIAS _prometheus_protobufs)
|
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
35
contrib/prometheus-protobufs-gogo/LICENSE
Normal file
@ -0,0 +1,35 @@
|
||||
Copyright (c) 2022, The Cosmos SDK Authors. All rights reserved.
|
||||
Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
|
||||
Protocol Buffers for Go with Gadgets
|
||||
|
||||
Go support for Protocol Buffers - Google's data interchange format
|
||||
|
||||
Copyright 2010 The Go Authors. All rights reserved.
|
||||
https://github.com/golang/protobuf
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
4
contrib/prometheus-protobufs-gogo/README
Normal file
4
contrib/prometheus-protobufs-gogo/README
Normal file
@ -0,0 +1,4 @@
|
||||
File "gogoproto/gogo.proto" was downloaded from the "Protocol Buffers for Go with Gadgets" project:
|
||||
https://github.com/cosmos/gogoproto/blob/main/gogoproto/gogo.proto
|
||||
|
||||
File "gogoproto/gogo.proto" is used in ClickHouse to compile prometheus protobufs.
|
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
145
contrib/prometheus-protobufs-gogo/gogoproto/gogo.proto
Normal file
@ -0,0 +1,145 @@
|
||||
// Protocol Buffers for Go with Gadgets
|
||||
//
|
||||
// Copyright (c) 2013, The GoGo Authors. All rights reserved.
|
||||
// http://github.com/cosmos/gogoproto
|
||||
//
|
||||
// Redistribution and use in source and binary forms, with or without
|
||||
// modification, are permitted provided that the following conditions are
|
||||
// met:
|
||||
//
|
||||
// * Redistributions of source code must retain the above copyright
|
||||
// notice, this list of conditions and the following disclaimer.
|
||||
// * Redistributions in binary form must reproduce the above
|
||||
// copyright notice, this list of conditions and the following disclaimer
|
||||
// in the documentation and/or other materials provided with the
|
||||
// distribution.
|
||||
//
|
||||
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
syntax = "proto2";
|
||||
package gogoproto;
|
||||
|
||||
import "google/protobuf/descriptor.proto";
|
||||
|
||||
option java_package = "com.google.protobuf";
|
||||
option java_outer_classname = "GoGoProtos";
|
||||
option go_package = "github.com/cosmos/gogoproto/gogoproto";
|
||||
|
||||
extend google.protobuf.EnumOptions {
|
||||
optional bool goproto_enum_prefix = 62001;
|
||||
optional bool goproto_enum_stringer = 62021;
|
||||
optional bool enum_stringer = 62022;
|
||||
optional string enum_customname = 62023;
|
||||
optional bool enumdecl = 62024;
|
||||
}
|
||||
|
||||
extend google.protobuf.EnumValueOptions {
|
||||
optional string enumvalue_customname = 66001;
|
||||
}
|
||||
|
||||
extend google.protobuf.FileOptions {
|
||||
optional bool goproto_getters_all = 63001;
|
||||
optional bool goproto_enum_prefix_all = 63002;
|
||||
optional bool goproto_stringer_all = 63003;
|
||||
optional bool verbose_equal_all = 63004;
|
||||
optional bool face_all = 63005;
|
||||
optional bool gostring_all = 63006;
|
||||
optional bool populate_all = 63007;
|
||||
optional bool stringer_all = 63008;
|
||||
optional bool onlyone_all = 63009;
|
||||
|
||||
optional bool equal_all = 63013;
|
||||
optional bool description_all = 63014;
|
||||
optional bool testgen_all = 63015;
|
||||
optional bool benchgen_all = 63016;
|
||||
optional bool marshaler_all = 63017;
|
||||
optional bool unmarshaler_all = 63018;
|
||||
optional bool stable_marshaler_all = 63019;
|
||||
|
||||
optional bool sizer_all = 63020;
|
||||
|
||||
optional bool goproto_enum_stringer_all = 63021;
|
||||
optional bool enum_stringer_all = 63022;
|
||||
|
||||
optional bool unsafe_marshaler_all = 63023;
|
||||
optional bool unsafe_unmarshaler_all = 63024;
|
||||
|
||||
optional bool goproto_extensions_map_all = 63025;
|
||||
optional bool goproto_unrecognized_all = 63026;
|
||||
optional bool gogoproto_import = 63027;
|
||||
optional bool protosizer_all = 63028;
|
||||
optional bool compare_all = 63029;
|
||||
optional bool typedecl_all = 63030;
|
||||
optional bool enumdecl_all = 63031;
|
||||
|
||||
optional bool goproto_registration = 63032;
|
||||
optional bool messagename_all = 63033;
|
||||
|
||||
optional bool goproto_sizecache_all = 63034;
|
||||
optional bool goproto_unkeyed_all = 63035;
|
||||
}
|
||||
|
||||
extend google.protobuf.MessageOptions {
|
||||
optional bool goproto_getters = 64001;
|
||||
optional bool goproto_stringer = 64003;
|
||||
optional bool verbose_equal = 64004;
|
||||
optional bool face = 64005;
|
||||
optional bool gostring = 64006;
|
||||
optional bool populate = 64007;
|
||||
optional bool stringer = 67008;
|
||||
optional bool onlyone = 64009;
|
||||
|
||||
optional bool equal = 64013;
|
||||
optional bool description = 64014;
|
||||
optional bool testgen = 64015;
|
||||
optional bool benchgen = 64016;
|
||||
optional bool marshaler = 64017;
|
||||
optional bool unmarshaler = 64018;
|
||||
optional bool stable_marshaler = 64019;
|
||||
|
||||
optional bool sizer = 64020;
|
||||
|
||||
optional bool unsafe_marshaler = 64023;
|
||||
optional bool unsafe_unmarshaler = 64024;
|
||||
|
||||
optional bool goproto_extensions_map = 64025;
|
||||
optional bool goproto_unrecognized = 64026;
|
||||
|
||||
optional bool protosizer = 64028;
|
||||
optional bool compare = 64029;
|
||||
|
||||
optional bool typedecl = 64030;
|
||||
|
||||
optional bool messagename = 64033;
|
||||
|
||||
optional bool goproto_sizecache = 64034;
|
||||
optional bool goproto_unkeyed = 64035;
|
||||
}
|
||||
|
||||
extend google.protobuf.FieldOptions {
|
||||
optional bool nullable = 65001;
|
||||
optional bool embed = 65002;
|
||||
optional string customtype = 65003;
|
||||
optional string customname = 65004;
|
||||
optional string jsontag = 65005;
|
||||
optional string moretags = 65006;
|
||||
optional string casttype = 65007;
|
||||
optional string castkey = 65008;
|
||||
optional string castvalue = 65009;
|
||||
|
||||
optional bool stdtime = 65010;
|
||||
optional bool stdduration = 65011;
|
||||
optional bool wktpointer = 65012;
|
||||
|
||||
optional string castrepeated = 65013;
|
||||
}
|
201
contrib/prometheus-protobufs/LICENSE
Normal file
201
contrib/prometheus-protobufs/LICENSE
Normal file
@ -0,0 +1,201 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
2
contrib/prometheus-protobufs/README
Normal file
2
contrib/prometheus-protobufs/README
Normal file
@ -0,0 +1,2 @@
|
||||
Files "prompb/remote.proto" and "prompb/types.proto" were downloaded from the Prometheus repository:
|
||||
https://github.com/prometheus/prometheus/tree/main/prompb
|
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
88
contrib/prometheus-protobufs/prompb/remote.proto
Normal file
@ -0,0 +1,88 @@
|
||||
// Copyright 2016 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "prompb/types.proto";
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
message WriteRequest {
|
||||
repeated prometheus.TimeSeries timeseries = 1 [(gogoproto.nullable) = false];
|
||||
// Cortex uses this field to determine the source of the write request.
|
||||
// We reserve it to avoid any compatibility issues.
|
||||
reserved 2;
|
||||
repeated prometheus.MetricMetadata metadata = 3 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// ReadRequest represents a remote read request.
|
||||
message ReadRequest {
|
||||
repeated Query queries = 1;
|
||||
|
||||
enum ResponseType {
|
||||
// Server will return a single ReadResponse message with matched series that includes list of raw samples.
|
||||
// It's recommended to use streamed response types instead.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-protobuf"
|
||||
// Content-Encoding: "snappy"
|
||||
SAMPLES = 0;
|
||||
// Server will stream a delimited ChunkedReadResponse message that
|
||||
// contains XOR or HISTOGRAM(!) encoded chunks for a single series.
|
||||
// Each message is following varint size and fixed size bigendian
|
||||
// uint32 for CRC32 Castagnoli checksum.
|
||||
//
|
||||
// Response headers:
|
||||
// Content-Type: "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse"
|
||||
// Content-Encoding: ""
|
||||
STREAMED_XOR_CHUNKS = 1;
|
||||
}
|
||||
|
||||
// accepted_response_types allows negotiating the content type of the response.
|
||||
//
|
||||
// Response types are taken from the list in the FIFO order. If no response type in `accepted_response_types` is
|
||||
// implemented by server, error is returned.
|
||||
// For request that do not contain `accepted_response_types` field the SAMPLES response type will be used.
|
||||
repeated ResponseType accepted_response_types = 2;
|
||||
}
|
||||
|
||||
// ReadResponse is a response when response_type equals SAMPLES.
|
||||
message ReadResponse {
|
||||
// In same order as the request's queries.
|
||||
repeated QueryResult results = 1;
|
||||
}
|
||||
|
||||
message Query {
|
||||
int64 start_timestamp_ms = 1;
|
||||
int64 end_timestamp_ms = 2;
|
||||
repeated prometheus.LabelMatcher matchers = 3;
|
||||
prometheus.ReadHints hints = 4;
|
||||
}
|
||||
|
||||
message QueryResult {
|
||||
// Samples within a time series must be ordered by time.
|
||||
repeated prometheus.TimeSeries timeseries = 1;
|
||||
}
|
||||
|
||||
// ChunkedReadResponse is a response when response_type equals STREAMED_XOR_CHUNKS.
|
||||
// We strictly stream full series after series, optionally split by time. This means that a single frame can contain
|
||||
// partition of the single series, but once a new series is started to be streamed it means that no more chunks will
|
||||
// be sent for previous one. Series are returned sorted in the same way TSDB block are internally.
|
||||
message ChunkedReadResponse {
|
||||
repeated prometheus.ChunkedSeries chunked_series = 1;
|
||||
|
||||
// query_index represents an index of the query from ReadRequest.queries these chunks relates to.
|
||||
int64 query_index = 2;
|
||||
}
|
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
187
contrib/prometheus-protobufs/prompb/types.proto
Normal file
@ -0,0 +1,187 @@
|
||||
// Copyright 2017 Prometheus Team
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
syntax = "proto3";
|
||||
package prometheus;
|
||||
|
||||
option go_package = "prompb";
|
||||
|
||||
import "gogoproto/gogo.proto";
|
||||
|
||||
message MetricMetadata {
|
||||
enum MetricType {
|
||||
UNKNOWN = 0;
|
||||
COUNTER = 1;
|
||||
GAUGE = 2;
|
||||
HISTOGRAM = 3;
|
||||
GAUGEHISTOGRAM = 4;
|
||||
SUMMARY = 5;
|
||||
INFO = 6;
|
||||
STATESET = 7;
|
||||
}
|
||||
|
||||
// Represents the metric type, these match the set from Prometheus.
|
||||
// Refer to github.com/prometheus/common/model/metadata.go for details.
|
||||
MetricType type = 1;
|
||||
string metric_family_name = 2;
|
||||
string help = 4;
|
||||
string unit = 5;
|
||||
}
|
||||
|
||||
message Sample {
|
||||
double value = 1;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 2;
|
||||
}
|
||||
|
||||
message Exemplar {
|
||||
// Optional, can be empty.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
double value = 2;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 3;
|
||||
}
|
||||
|
||||
// A native histogram, also known as a sparse histogram.
|
||||
// Original design doc:
|
||||
// https://docs.google.com/document/d/1cLNv3aufPZb3fNfaJgdaRBZsInZKKIHo9E6HinJVbpM/edit
|
||||
// The appendix of this design doc also explains the concept of float
|
||||
// histograms. This Histogram message can represent both, the usual
|
||||
// integer histogram as well as a float histogram.
|
||||
message Histogram {
|
||||
enum ResetHint {
|
||||
UNKNOWN = 0; // Need to test for a counter reset explicitly.
|
||||
YES = 1; // This is the 1st histogram after a counter reset.
|
||||
NO = 2; // There was no counter reset between this and the previous Histogram.
|
||||
GAUGE = 3; // This is a gauge histogram where counter resets don't happen.
|
||||
}
|
||||
|
||||
oneof count { // Count of observations in the histogram.
|
||||
uint64 count_int = 1;
|
||||
double count_float = 2;
|
||||
}
|
||||
double sum = 3; // Sum of observations in the histogram.
|
||||
// The schema defines the bucket schema. Currently, valid numbers
|
||||
// are -4 <= n <= 8. They are all for base-2 bucket schemas, where 1
|
||||
// is a bucket boundary in each case, and then each power of two is
|
||||
// divided into 2^n logarithmic buckets. Or in other words, each
|
||||
// bucket boundary is the previous boundary times 2^(2^-n). In the
|
||||
// future, more bucket schemas may be added using numbers < -4 or >
|
||||
// 8.
|
||||
sint32 schema = 4;
|
||||
double zero_threshold = 5; // Breadth of the zero bucket.
|
||||
oneof zero_count { // Count in zero bucket.
|
||||
uint64 zero_count_int = 6;
|
||||
double zero_count_float = 7;
|
||||
}
|
||||
|
||||
// Negative Buckets.
|
||||
repeated BucketSpan negative_spans = 8 [(gogoproto.nullable) = false];
|
||||
// Use either "negative_deltas" or "negative_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 negative_deltas = 9; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double negative_counts = 10; // Absolute count of each bucket.
|
||||
|
||||
// Positive Buckets.
|
||||
repeated BucketSpan positive_spans = 11 [(gogoproto.nullable) = false];
|
||||
// Use either "positive_deltas" or "positive_counts", the former for
|
||||
// regular histograms with integer counts, the latter for float
|
||||
// histograms.
|
||||
repeated sint64 positive_deltas = 12; // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
|
||||
repeated double positive_counts = 13; // Absolute count of each bucket.
|
||||
|
||||
ResetHint reset_hint = 14;
|
||||
// timestamp is in ms format, see model/timestamp/timestamp.go for
|
||||
// conversion from time.Time to Prometheus timestamp.
|
||||
int64 timestamp = 15;
|
||||
}
|
||||
|
||||
// A BucketSpan defines a number of consecutive buckets with their
|
||||
// offset. Logically, it would be more straightforward to include the
|
||||
// bucket counts in the Span. However, the protobuf representation is
|
||||
// more compact in the way the data is structured here (with all the
|
||||
// buckets in a single array separate from the Spans).
|
||||
message BucketSpan {
|
||||
sint32 offset = 1; // Gap to previous span, or starting point for 1st span (which can be negative).
|
||||
uint32 length = 2; // Length of consecutive buckets.
|
||||
}
|
||||
|
||||
// TimeSeries represents samples and labels for a single time series.
|
||||
message TimeSeries {
|
||||
// For a timeseries to be valid, and for the samples and exemplars
|
||||
// to be ingested by the remote system properly, the labels field is required.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
repeated Sample samples = 2 [(gogoproto.nullable) = false];
|
||||
repeated Exemplar exemplars = 3 [(gogoproto.nullable) = false];
|
||||
repeated Histogram histograms = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
message Label {
|
||||
string name = 1;
|
||||
string value = 2;
|
||||
}
|
||||
|
||||
message Labels {
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
// Matcher specifies a rule, which can match or set of labels or not.
|
||||
message LabelMatcher {
|
||||
enum Type {
|
||||
EQ = 0;
|
||||
NEQ = 1;
|
||||
RE = 2;
|
||||
NRE = 3;
|
||||
}
|
||||
Type type = 1;
|
||||
string name = 2;
|
||||
string value = 3;
|
||||
}
|
||||
|
||||
message ReadHints {
|
||||
int64 step_ms = 1; // Query step size in milliseconds.
|
||||
string func = 2; // String representation of surrounding function or aggregation.
|
||||
int64 start_ms = 3; // Start time in milliseconds.
|
||||
int64 end_ms = 4; // End time in milliseconds.
|
||||
repeated string grouping = 5; // List of label names used in aggregation.
|
||||
bool by = 6; // Indicate whether it is without or by.
|
||||
int64 range_ms = 7; // Range vector selector range in milliseconds.
|
||||
}
|
||||
|
||||
// Chunk represents a TSDB chunk.
|
||||
// Time range [min, max] is inclusive.
|
||||
message Chunk {
|
||||
int64 min_time_ms = 1;
|
||||
int64 max_time_ms = 2;
|
||||
|
||||
// We require this to match chunkenc.Encoding.
|
||||
enum Encoding {
|
||||
UNKNOWN = 0;
|
||||
XOR = 1;
|
||||
HISTOGRAM = 2;
|
||||
FLOAT_HISTOGRAM = 3;
|
||||
}
|
||||
Encoding type = 3;
|
||||
bytes data = 4;
|
||||
}
|
||||
|
||||
// ChunkedSeries represents single, encoded time series.
|
||||
message ChunkedSeries {
|
||||
// Labels should be sorted.
|
||||
repeated Label labels = 1 [(gogoproto.nullable) = false];
|
||||
// Chunks will be in start time order and may overlap.
|
||||
repeated Chunk chunks = 2 [(gogoproto.nullable) = false];
|
||||
}
|
2
contrib/s2geometry
vendored
2
contrib/s2geometry
vendored
@ -1 +1 @@
|
||||
Subproject commit 0547c38371777a1c1c8be263a6f05c3bf71bb05b
|
||||
Subproject commit 6522a40338d58752c2a4227a3fc2bc4107c73e43
|
@ -1,7 +1,7 @@
|
||||
option(ENABLE_S2_GEOMETRY "Enable S2 geometry library" ${ENABLE_LIBRARIES})
|
||||
option(ENABLE_S2_GEOMETRY "Enable S2 Geometry" ${ENABLE_LIBRARIES})
|
||||
|
||||
if (NOT ENABLE_S2_GEOMETRY)
|
||||
message(STATUS "Not using S2 geometry")
|
||||
message(STATUS "Not using S2 Geometry")
|
||||
return()
|
||||
endif()
|
||||
|
||||
@ -38,6 +38,7 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2cell_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2cell_union.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2centroids.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2chain_interpolation_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_cell_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2closest_point_query.cc"
|
||||
@ -46,6 +47,7 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2coords.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2crossing_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2debug.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2density_tree.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2earth.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_clipping.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_crosser.cc"
|
||||
@ -53,8 +55,10 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_distances.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2edge_tessellator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2error.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2fractal.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2furthest_edge_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2hausdorff_distance_query.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2index_cell_data.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2latlng_rect_bounder.cc"
|
||||
@ -63,10 +67,10 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2lax_polyline_shape.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2loop.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2loop_measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2measures.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2memory_tracker.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2metrics.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2max_distance_targets.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2min_distance_targets.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2padded_cell.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2point_compression.cc"
|
||||
@ -80,10 +84,11 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2predicates.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2projections.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2r2rect.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2random.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_coverer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_intersection.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_sharder.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_term_indexer.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2region_union.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shape_index_buffered_region.cc"
|
||||
@ -94,9 +99,12 @@ set(S2_SRCS
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_coding.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_contains_brute_force.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_conversion.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_count_vertices.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_iterator.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_edge_wrap.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_get_reference_point.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2shapeutil_visit_crossing_edge_pairs.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2testing.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2text_format.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2wedge_relations.cc"
|
||||
"${S2_SOURCE_DIR}/s2/s2winding_operation.cc"
|
||||
@ -140,6 +148,7 @@ target_link_libraries(_s2 PRIVATE
|
||||
absl::strings
|
||||
absl::type_traits
|
||||
absl::utility
|
||||
absl::vlog_is_on
|
||||
)
|
||||
|
||||
target_include_directories(_s2 SYSTEM BEFORE PUBLIC "${S2_SOURCE_DIR}/")
|
||||
|
2
contrib/vectorscan
vendored
2
contrib/vectorscan
vendored
@ -1 +1 @@
|
||||
Subproject commit 4918f81ea3d1abd18905bac9876d4a1fe2ebdf07
|
||||
Subproject commit d29730e1cb9daaa66bda63426cdce83505d2c809
|
@ -1,11 +1,8 @@
|
||||
# We use vectorscan, a portable and API/ABI-compatible drop-in replacement for hyperscan.
|
||||
|
||||
# Vectorscan is drop-in replacement for Hyperscan.
|
||||
if ((ARCH_AMD64 AND NOT NO_SSE3_OR_HIGHER) OR ARCH_AARCH64)
|
||||
option (ENABLE_VECTORSCAN "Enable vectorscan library" ${ENABLE_LIBRARIES})
|
||||
option (ENABLE_VECTORSCAN "Enable vectorscan" ${ENABLE_LIBRARIES})
|
||||
endif()
|
||||
|
||||
# TODO PPC should generally work but needs manual generation of ppc/config.h file on a PPC machine
|
||||
|
||||
if (NOT ENABLE_VECTORSCAN)
|
||||
message (STATUS "Not using vectorscan")
|
||||
return()
|
||||
@ -272,34 +269,24 @@ if (ARCH_AARCH64)
|
||||
)
|
||||
endif()
|
||||
|
||||
# TODO
|
||||
# if (ARCH_PPC64LE)
|
||||
# list(APPEND SRCS
|
||||
# "${LIBRARY_DIR}/src/util/supervector/arch/ppc64el/impl.cpp"
|
||||
# )
|
||||
# endif()
|
||||
|
||||
add_library (_vectorscan ${SRCS})
|
||||
|
||||
target_compile_options (_vectorscan PRIVATE
|
||||
-fno-sanitize=undefined # assume the library takes care of itself
|
||||
-O2 -fno-strict-aliasing -fno-omit-frame-pointer -fvisibility=hidden # options from original build system
|
||||
)
|
||||
# library has too much debug information
|
||||
if (OMIT_HEAVY_DEBUG_SYMBOLS)
|
||||
target_compile_options (_vectorscan PRIVATE -g0)
|
||||
endif()
|
||||
|
||||
# Include version header manually generated by running the original build system
|
||||
target_include_directories (_vectorscan SYSTEM PRIVATE common)
|
||||
target_include_directories (_vectorscan SYSTEM PUBLIC "${LIBRARY_DIR}/src")
|
||||
|
||||
# Makes the version header visible. It was generated by running the native build system manually.
|
||||
# Please update whenever you update vectorscan.
|
||||
target_include_directories (_vectorscan SYSTEM PUBLIC common)
|
||||
|
||||
# vectorscan inherited some patched in-source versions of boost headers to fix a bug in
|
||||
# boost 1.69. This bug has been solved long ago but vectorscan's source code still
|
||||
# points to the patched versions, so include it here.
|
||||
target_include_directories (_vectorscan SYSTEM PRIVATE "${LIBRARY_DIR}/include")
|
||||
|
||||
target_include_directories (_vectorscan SYSTEM PUBLIC "${LIBRARY_DIR}/src")
|
||||
|
||||
# Include platform-specific config header generated by manually running the original build system
|
||||
# Please regenerate these files if you update vectorscan.
|
||||
|
||||
|
@ -32,8 +32,12 @@
|
||||
/**
|
||||
* A version string to identify this release of Hyperscan.
|
||||
*/
|
||||
#define HS_VERSION_STRING "5.4.7 2022-06-20"
|
||||
#define HS_VERSION_STRING "5.4.11 2024-07-04"
|
||||
|
||||
#define HS_VERSION_32BIT ((5 << 24) | (1 << 16) | (7 << 8) | 0)
|
||||
|
||||
#define HS_MAJOR 5
|
||||
#define HS_MINOR 4
|
||||
#define HS_PATCH 11
|
||||
|
||||
#endif /* HS_VERSION_H_C6428FAF8E3713 */
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.6.1.4423"
|
||||
ARG VERSION="24.6.2.17"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
47
docker/reqgenerator.py
Normal file
47
docker/reqgenerator.py
Normal file
@ -0,0 +1,47 @@
|
||||
#!/usr/bin/env python3
|
||||
# To run this script you must install docker and piddeptree python package
|
||||
#
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
||||
def build_docker_deps(image_name, imagedir):
|
||||
cmd = f"""docker run --entrypoint "/bin/bash" {image_name} -c "pip install pipdeptree 2>/dev/null 1>/dev/null && pipdeptree --freeze --warn silence | sed 's/ \+//g' | sort | uniq" > {imagedir}/requirements.txt"""
|
||||
subprocess.check_call(cmd, shell=True)
|
||||
|
||||
|
||||
def check_docker_file_install_with_pip(filepath):
|
||||
image_name = None
|
||||
with open(filepath, "r") as f:
|
||||
for line in f:
|
||||
if "docker build" in line:
|
||||
arr = line.split(" ")
|
||||
if len(arr) > 4:
|
||||
image_name = arr[4]
|
||||
if "pip3 install" in line or "pip install" in line:
|
||||
return image_name, True
|
||||
return image_name, False
|
||||
|
||||
|
||||
def process_affected_images(images_dir):
|
||||
for root, _dirs, files in os.walk(images_dir):
|
||||
for f in files:
|
||||
if f == "Dockerfile":
|
||||
docker_file_path = os.path.join(root, f)
|
||||
print("Checking image on path", docker_file_path)
|
||||
image_name, has_pip = check_docker_file_install_with_pip(
|
||||
docker_file_path
|
||||
)
|
||||
if has_pip:
|
||||
print("Find pip in", image_name)
|
||||
try:
|
||||
build_docker_deps(image_name, root)
|
||||
except Exception as ex:
|
||||
print(ex)
|
||||
else:
|
||||
print("Pip not found in", docker_file_path)
|
||||
|
||||
|
||||
process_affected_images(sys.argv[1])
|
@ -32,7 +32,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.6.1.4423"
|
||||
ARG VERSION="24.6.2.17"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.6.1.4423"
|
||||
ARG VERSION="24.6.2.17"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
@ -19,10 +19,7 @@ RUN apt-get update \
|
||||
odbcinst \
|
||||
psmisc \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-pip \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
unixodbc \
|
||||
pv \
|
||||
jq \
|
||||
@ -31,7 +28,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
# This symlink is required by gcc to find the lld linker
|
||||
RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
@ -39,6 +37,10 @@ RUN ln -s /usr/bin/lld-${LLVM_VERSION} /usr/bin/ld.lld
|
||||
# https://salsa.debian.org/pkg-llvm-team/llvm-toolchain/-/commit/992e52c0b156a5ba9c6a8a54f8c4857ddd3d371d
|
||||
RUN sed -i '/_IMPORT_CHECK_FILES_FOR_\(mlir-\|llvm-bolt\|merge-fdata\|MLIR\)/ {s|^|#|}' /usr/lib/llvm-${LLVM_VERSION}/lib/cmake/llvm/LLVMExports-*.cmake
|
||||
|
||||
# LLVM changes paths for compiler-rt libraries. For some reason clang-18.1.8 cannot catch up libraries from default install path.
|
||||
# It's very dirty workaround, better to build compiler and LLVM ourself and use it. Details: https://github.com/llvm/llvm-project/issues/95792
|
||||
RUN test ! -d /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu || ln -s /usr/lib/llvm-18/lib/clang/18/lib/x86_64-pc-linux-gnu /usr/lib/llvm-18/lib/clang/18/lib/x86_64-unknown-linux-gnu
|
||||
|
||||
ARG CCACHE_VERSION=4.6.1
|
||||
RUN mkdir /tmp/ccache \
|
||||
&& cd /tmp/ccache \
|
||||
|
41
docker/test/fasttest/requirements.txt
Normal file
41
docker/test/fasttest/requirements.txt
Normal file
@ -0,0 +1,41 @@
|
||||
Jinja2==3.1.3
|
||||
MarkupSafe==2.1.5
|
||||
PyJWT==2.3.0
|
||||
PyYAML==6.0.1
|
||||
Pygments==2.11.2
|
||||
SecretStorage==3.3.1
|
||||
blinker==1.4
|
||||
certifi==2020.6.20
|
||||
chardet==4.0.0
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
idna==3.3
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
lxml==4.8.0
|
||||
more-itertools==8.10.0
|
||||
numpy==1.26.3
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pandas==1.5.3
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
python-dateutil==2.9.0.post0
|
||||
pytz==2024.1
|
||||
requests==2.32.3
|
||||
scipy==1.12.0
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
termcolor==1.1.0
|
||||
urllib3==1.26.5
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -84,6 +84,8 @@ function start_server
|
||||
echo "ClickHouse server pid '$server_pid' started and responded"
|
||||
}
|
||||
|
||||
export -f start_server
|
||||
|
||||
function clone_root
|
||||
{
|
||||
[ "$UID" -eq 0 ] && git config --global --add safe.directory "$FASTTEST_SOURCE"
|
||||
@ -254,6 +256,19 @@ function configure
|
||||
rm -f "$FASTTEST_DATA/config.d/secure_ports.xml"
|
||||
}
|
||||
|
||||
function timeout_with_logging() {
|
||||
local exit_code=0
|
||||
|
||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||
|
||||
if [[ "${exit_code}" -eq "124" ]]
|
||||
then
|
||||
echo "The command 'timeout ${*}' has been killed by timeout"
|
||||
fi
|
||||
|
||||
return $exit_code
|
||||
}
|
||||
|
||||
function run_tests
|
||||
{
|
||||
clickhouse-server --version
|
||||
@ -292,6 +307,8 @@ function run_tests
|
||||
clickhouse stop --pid-path "$FASTTEST_DATA"
|
||||
}
|
||||
|
||||
export -f run_tests
|
||||
|
||||
case "$stage" in
|
||||
"")
|
||||
ls -la
|
||||
@ -315,7 +332,7 @@ case "$stage" in
|
||||
configure 2>&1 | ts '%Y-%m-%d %H:%M:%S' | tee "$FASTTEST_OUTPUT/install_log.txt"
|
||||
;&
|
||||
"run_tests")
|
||||
run_tests
|
||||
timeout_with_logging 35m bash -c run_tests ||:
|
||||
/process_functional_tests_result.py --in-results-dir "$FASTTEST_OUTPUT/" \
|
||||
--out-results-file "$FASTTEST_OUTPUT/test_results.tsv" \
|
||||
--out-status-file "$FASTTEST_OUTPUT/check_status.tsv" || echo -e "failure\tCannot parse results" > "$FASTTEST_OUTPUT/check_status.tsv"
|
||||
|
@ -31,7 +31,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install Jinja2
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
COPY * /
|
||||
|
||||
|
27
docker/test/fuzzer/requirements.txt
Normal file
27
docker/test/fuzzer/requirements.txt
Normal file
@ -0,0 +1,27 @@
|
||||
blinker==1.4
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
Jinja2==3.1.4
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
MarkupSafe==2.1.5
|
||||
more-itertools==8.10.0
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -33,7 +33,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install pycurl
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt && rm -rf /root/.cache/pip
|
||||
|
||||
# Architecture of the image when BuildKit/buildx is used
|
||||
ARG TARGETARCH
|
||||
|
26
docker/test/integration/base/requirements.txt
Normal file
26
docker/test/integration/base/requirements.txt
Normal file
@ -0,0 +1,26 @@
|
||||
blinker==1.4
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
more-itertools==8.10.0
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
pycurl==7.45.3
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -2,4 +2,5 @@
|
||||
# Helper docker container to run python bottle apps
|
||||
|
||||
FROM python:3
|
||||
RUN python -m pip install bottle
|
||||
COPY requirements.txt /
|
||||
RUN python -m pip install --no-cache-dir -r requirements.txt
|
||||
|
6
docker/test/integration/resolver/requirements.txt
Normal file
6
docker/test/integration/resolver/requirements.txt
Normal file
@ -0,0 +1,6 @@
|
||||
bottle==0.12.25
|
||||
packaging==24.1
|
||||
pip==23.2.1
|
||||
pipdeptree==2.23.0
|
||||
setuptools==69.0.3
|
||||
wheel==0.42.0
|
@ -26,7 +26,6 @@ RUN apt-get update \
|
||||
libicu-dev \
|
||||
bsdutils \
|
||||
curl \
|
||||
python3-pika \
|
||||
liblua5.1-dev \
|
||||
luajit \
|
||||
libssl-dev \
|
||||
@ -61,49 +60,8 @@ RUN curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - \
|
||||
|
||||
# kazoo 2.10.0 is broken
|
||||
# https://s3.amazonaws.com/clickhouse-test-reports/59337/524625a1d2f4cc608a3f1059e3df2c30f353a649/integration_tests__asan__analyzer__[5_6].html
|
||||
RUN python3 -m pip install --no-cache-dir \
|
||||
PyMySQL==1.1.0 \
|
||||
asyncio==3.4.3 \
|
||||
avro==1.10.2 \
|
||||
azure-storage-blob==12.19.0 \
|
||||
boto3==1.34.24 \
|
||||
cassandra-driver==3.29.0 \
|
||||
confluent-kafka==2.3.0 \
|
||||
delta-spark==2.3.0 \
|
||||
dict2xml==1.7.4 \
|
||||
dicttoxml==1.7.16 \
|
||||
docker==6.1.3 \
|
||||
docker-compose==1.29.2 \
|
||||
grpcio==1.60.0 \
|
||||
grpcio-tools==1.60.0 \
|
||||
kafka-python==2.0.2 \
|
||||
lz4==4.3.3 \
|
||||
minio==7.2.3 \
|
||||
nats-py==2.6.0 \
|
||||
protobuf==4.25.2 \
|
||||
kazoo==2.9.0 \
|
||||
psycopg2-binary==2.9.6 \
|
||||
pyhdfs==0.3.1 \
|
||||
pymongo==3.11.0 \
|
||||
pyspark==3.3.2 \
|
||||
pytest==7.4.4 \
|
||||
pytest-order==1.0.0 \
|
||||
pytest-random==0.2 \
|
||||
pytest-repeat==0.9.3 \
|
||||
pytest-timeout==2.2.0 \
|
||||
pytest-xdist==3.5.0 \
|
||||
pytest-reportlog==0.4.0 \
|
||||
pytz==2023.3.post1 \
|
||||
pyyaml==5.3.1 \
|
||||
redis==5.0.1 \
|
||||
requests-kerberos==0.14.0 \
|
||||
tzlocal==2.1 \
|
||||
retry==0.9.2 \
|
||||
bs4==0.0.2 \
|
||||
lxml==5.1.0 \
|
||||
urllib3==2.0.7 \
|
||||
jwcrypto==1.5.6
|
||||
# bs4, lxml are for cloud tests, do not delete
|
||||
COPY requirements.txt /
|
||||
RUN python3 -m pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
# Hudi supports only spark 3.3.*, not 3.4
|
||||
RUN curl -fsSL -O https://archive.apache.org/dist/spark/spark-3.3.2/spark-3.3.2-bin-hadoop3.tgz \
|
||||
|
113
docker/test/integration/runner/requirements.txt
Normal file
113
docker/test/integration/runner/requirements.txt
Normal file
@ -0,0 +1,113 @@
|
||||
PyHDFS==0.3.1
|
||||
PyJWT==2.3.0
|
||||
PyMySQL==1.1.0
|
||||
PyNaCl==1.5.0
|
||||
PyYAML==5.3.1
|
||||
SecretStorage==3.3.1
|
||||
argon2-cffi-bindings==21.2.0
|
||||
argon2-cffi==23.1.0
|
||||
async-timeout==4.0.3
|
||||
asyncio==3.4.3
|
||||
attrs==23.2.0
|
||||
avro==1.10.2
|
||||
azure-core==1.30.1
|
||||
azure-storage-blob==12.19.0
|
||||
bcrypt==4.1.3
|
||||
beautifulsoup4==4.12.3
|
||||
blinker==1.4
|
||||
boto3==1.34.24
|
||||
botocore==1.34.101
|
||||
bs4==0.0.2
|
||||
cassandra-driver==3.29.0
|
||||
certifi==2024.2.2
|
||||
cffi==1.16.0
|
||||
charset-normalizer==3.3.2
|
||||
click==8.1.7
|
||||
confluent-kafka==2.3.0
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
decorator==5.1.1
|
||||
delta-spark==2.3.0
|
||||
dict2xml==1.7.4
|
||||
dicttoxml==1.7.16
|
||||
distro-info==1.1+ubuntu0.2
|
||||
distro==1.7.0
|
||||
docker-compose==1.29.2
|
||||
docker==6.1.3
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
exceptiongroup==1.2.1
|
||||
execnet==2.1.1
|
||||
geomet==0.2.1.post1
|
||||
grpcio-tools==1.60.0
|
||||
grpcio==1.60.0
|
||||
gssapi==1.8.3
|
||||
httplib2==0.20.2
|
||||
idna==3.7
|
||||
importlib-metadata==4.6.4
|
||||
iniconfig==2.0.0
|
||||
isodate==0.6.1
|
||||
jeepney==0.7.1
|
||||
jmespath==1.0.1
|
||||
jsonschema==3.2.0
|
||||
jwcrypto==1.5.6
|
||||
kafka-python==2.0.2
|
||||
kazoo==2.9.0
|
||||
keyring==23.5.0
|
||||
krb5==0.5.1
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
lxml==5.1.0
|
||||
lz4==4.3.3
|
||||
minio==7.2.3
|
||||
more-itertools==8.10.0
|
||||
nats-py==2.6.0
|
||||
oauthlib==3.2.0
|
||||
packaging==24.0
|
||||
paramiko==3.4.0
|
||||
pika==1.2.0
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
pluggy==1.5.0
|
||||
protobuf==4.25.2
|
||||
psycopg2-binary==2.9.6
|
||||
py4j==0.10.9.5
|
||||
py==1.11.0
|
||||
pycparser==2.22
|
||||
pycryptodome==3.20.0
|
||||
pymongo==3.11.0
|
||||
pyparsing==2.4.7
|
||||
pyrsistent==0.20.0
|
||||
pyspark==3.3.2
|
||||
pyspnego==0.10.2
|
||||
pytest-order==1.0.0
|
||||
pytest-random==0.2
|
||||
pytest-repeat==0.9.3
|
||||
pytest-reportlog==0.4.0
|
||||
pytest-timeout==2.2.0
|
||||
pytest-xdist==3.5.0
|
||||
pytest==7.4.4
|
||||
python-apt==2.4.0+ubuntu3
|
||||
python-dateutil==2.9.0.post0
|
||||
python-dotenv==0.21.1
|
||||
pytz==2023.3.post1
|
||||
redis==5.0.1
|
||||
requests-kerberos==0.14.0
|
||||
requests==2.31.0
|
||||
retry==0.9.2
|
||||
s3transfer==0.10.1
|
||||
setuptools==59.6.0
|
||||
simplejson==3.19.2
|
||||
six==1.16.0
|
||||
soupsieve==2.5
|
||||
texttable==1.7.0
|
||||
tomli==2.0.1
|
||||
typing_extensions==4.11.0
|
||||
tzlocal==2.1
|
||||
unattended-upgrades==0.1
|
||||
urllib3==2.0.7
|
||||
wadllib==1.3.6
|
||||
websocket-client==0.59.0
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -1,3 +1,4 @@
|
||||
# docker build -t clickhouse/libfuzzer .
|
||||
ARG FROM_TAG=latest
|
||||
FROM clickhouse/test-base:$FROM_TAG
|
||||
|
||||
@ -29,7 +30,8 @@ RUN apt-get update \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install Jinja2
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
COPY * /
|
||||
|
||||
|
27
docker/test/libfuzzer/requirements.txt
Normal file
27
docker/test/libfuzzer/requirements.txt
Normal file
@ -0,0 +1,27 @@
|
||||
blinker==1.4
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
Jinja2==3.1.4
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
MarkupSafe==2.1.5
|
||||
more-itertools==8.10.0
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -23,7 +23,6 @@ RUN apt-get update \
|
||||
python3 \
|
||||
python3-dev \
|
||||
python3-pip \
|
||||
python3-setuptools \
|
||||
rsync \
|
||||
tree \
|
||||
tzdata \
|
||||
@ -33,12 +32,14 @@ RUN apt-get update \
|
||||
cargo \
|
||||
ripgrep \
|
||||
zstd \
|
||||
&& pip3 --no-cache-dir install 'clickhouse-driver==0.2.1' scipy \
|
||||
&& apt-get purge --yes python3-dev g++ \
|
||||
&& apt-get autoremove --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
COPY requirements.txt /
|
||||
RUN pip3 --no-cache-dir install -r requirements.txt
|
||||
|
||||
COPY run.sh /
|
||||
|
||||
CMD ["bash", "/run.sh"]
|
||||
|
32
docker/test/performance-comparison/requirements.txt
Normal file
32
docker/test/performance-comparison/requirements.txt
Normal file
@ -0,0 +1,32 @@
|
||||
blinker==1.4
|
||||
clickhouse-driver==0.2.7
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
more-itertools==8.10.0
|
||||
numpy==1.26.3
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
Pygments==2.11.2
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
pytz==2023.4
|
||||
PyYAML==6.0.1
|
||||
scipy==1.12.0
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
tzlocal==2.1
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -18,11 +18,8 @@ RUN apt-get update --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install \
|
||||
numpy \
|
||||
pyodbc \
|
||||
deepdiff \
|
||||
sqlglot
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
ARG odbc_driver_url="https://github.com/ClickHouse/clickhouse-odbc/releases/download/v1.1.6.20200320/clickhouse-odbc-1.1.6-Linux.tar.gz"
|
||||
|
||||
|
30
docker/test/sqllogic/requirements.txt
Normal file
30
docker/test/sqllogic/requirements.txt
Normal file
@ -0,0 +1,30 @@
|
||||
blinker==1.4
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
deepdiff==7.0.1
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
more-itertools==8.10.0
|
||||
numpy==1.26.4
|
||||
oauthlib==3.2.0
|
||||
ordered-set==4.1.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
PyJWT==2.3.0
|
||||
pyodbc==5.1.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
sqlglot==23.16.0
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -14,9 +14,8 @@ RUN apt-get update --yes \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
RUN pip3 install \
|
||||
pyyaml \
|
||||
clickhouse-driver
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
ARG sqltest_repo="https://github.com/elliotchance/sqltest/"
|
||||
|
||||
|
29
docker/test/sqltest/requirements.txt
Normal file
29
docker/test/sqltest/requirements.txt
Normal file
@ -0,0 +1,29 @@
|
||||
blinker==1.4
|
||||
clickhouse-driver==0.2.7
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
httplib2==0.20.2
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
more-itertools==8.10.0
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
pytz==2024.1
|
||||
PyYAML==6.0.1
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
tzlocal==5.2
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -6,7 +6,6 @@ FROM clickhouse/stateless-test:$FROM_TAG
|
||||
RUN apt-get update -y \
|
||||
&& env DEBIAN_FRONTEND=noninteractive \
|
||||
apt-get install --yes --no-install-recommends \
|
||||
python3-requests \
|
||||
nodejs \
|
||||
npm \
|
||||
&& apt-get clean \
|
||||
|
@ -25,10 +25,7 @@ RUN apt-get update -y \
|
||||
openssl \
|
||||
postgresql-client \
|
||||
python3 \
|
||||
python3-lxml \
|
||||
python3-pip \
|
||||
python3-requests \
|
||||
python3-termcolor \
|
||||
qemu-user-static \
|
||||
sqlite3 \
|
||||
sudo \
|
||||
@ -51,7 +48,8 @@ RUN curl -OL https://github.com/protocolbuffers/protobuf/releases/download/v${PR
|
||||
&& unzip protoc-${PROTOC_VERSION}-linux-x86_64.zip -d /usr/local \
|
||||
&& rm protoc-${PROTOC_VERSION}-linux-x86_64.zip
|
||||
|
||||
RUN pip3 install numpy==1.26.3 scipy==1.12.0 pandas==1.5.3 Jinja2==3.1.3 pyarrow==15.0.0
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r /requirements.txt
|
||||
|
||||
RUN mkdir -p /tmp/clickhouse-odbc-tmp \
|
||||
&& cd /tmp/clickhouse-odbc-tmp \
|
||||
|
51
docker/test/stateless/requirements.txt
Normal file
51
docker/test/stateless/requirements.txt
Normal file
@ -0,0 +1,51 @@
|
||||
awscli==1.22.34
|
||||
blinker==1.4
|
||||
botocore==1.23.34
|
||||
certifi==2020.6.20
|
||||
chardet==4.0.0
|
||||
colorama==0.4.4
|
||||
cryptography==3.4.8
|
||||
dbus-python==1.2.18
|
||||
distro==1.7.0
|
||||
docutils==0.17.1
|
||||
gyp==0.1
|
||||
httplib2==0.20.2
|
||||
idna==3.3
|
||||
importlib-metadata==4.6.4
|
||||
jeepney==0.7.1
|
||||
Jinja2==3.1.3
|
||||
jmespath==0.10.0
|
||||
keyring==23.5.0
|
||||
launchpadlib==1.10.16
|
||||
lazr.restfulclient==0.14.4
|
||||
lazr.uri==1.0.6
|
||||
lxml==4.8.0
|
||||
MarkupSafe==2.1.5
|
||||
more-itertools==8.10.0
|
||||
numpy==1.26.3
|
||||
oauthlib==3.2.0
|
||||
packaging==24.1
|
||||
pandas==1.5.3
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
pyarrow==15.0.0
|
||||
pyasn1==0.4.8
|
||||
PyJWT==2.3.0
|
||||
pyparsing==2.4.7
|
||||
python-apt==2.4.0+ubuntu3
|
||||
python-dateutil==2.8.1
|
||||
pytz==2024.1
|
||||
PyYAML==6.0.1
|
||||
requests==2.32.3
|
||||
roman==3.3
|
||||
rsa==4.8
|
||||
s3transfer==0.5.0
|
||||
scipy==1.12.0
|
||||
SecretStorage==3.3.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
termcolor==1.1.0
|
||||
urllib3==1.26.5
|
||||
wadllib==1.3.6
|
||||
wheel==0.37.1
|
||||
zipp==1.0.0
|
@ -6,6 +6,9 @@ source /setup_export_logs.sh
|
||||
# fail on errors, verbose and export all env variables
|
||||
set -e -x -a
|
||||
|
||||
MAX_RUN_TIME=${MAX_RUN_TIME:-10800}
|
||||
MAX_RUN_TIME=$((MAX_RUN_TIME == 0 ? 10800 : MAX_RUN_TIME))
|
||||
|
||||
# Choose random timezone for this test run.
|
||||
#
|
||||
# NOTE: that clickhouse-test will randomize session_timezone by itself as well
|
||||
@ -262,14 +265,17 @@ function run_tests()
|
||||
|
||||
export -f run_tests
|
||||
|
||||
|
||||
# This should be enough to setup job and collect artifacts
|
||||
TIMEOUT=$((MAX_RUN_TIME - 300))
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
# We don't run tests with Ordinary database in PRs, only in master.
|
||||
# So run new/changed tests with Ordinary at least once in flaky check.
|
||||
timeout_with_logging "$MAX_RUN_TIME" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||
timeout_with_logging "$TIMEOUT" bash -c 'NUM_TRIES=1; USE_DATABASE_ORDINARY=1; run_tests' \
|
||||
| sed 's/All tests have finished//' | sed 's/No tests were run//' ||:
|
||||
fi
|
||||
|
||||
timeout_with_logging "$MAX_RUN_TIME" bash -c run_tests ||:
|
||||
timeout_with_logging "$TIMEOUT" bash -c run_tests ||:
|
||||
|
||||
echo "Files in current directory"
|
||||
ls -la ./
|
||||
|
@ -38,7 +38,7 @@ function fn_exists() {
|
||||
function timeout_with_logging() {
|
||||
local exit_code=0
|
||||
|
||||
timeout "${@}" || exit_code="${?}"
|
||||
timeout -s TERM --preserve-status "${@}" || exit_code="${?}"
|
||||
|
||||
if [[ "${exit_code}" -eq "124" ]]
|
||||
then
|
||||
|
@ -23,22 +23,8 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# python-magic is the same version as in Ubuntu 22.04
|
||||
RUN pip3 install \
|
||||
PyGithub \
|
||||
black==23.12.0 \
|
||||
boto3 \
|
||||
codespell==2.2.1 \
|
||||
mypy==1.8.0 \
|
||||
pylint==3.1.0 \
|
||||
python-magic==0.4.24 \
|
||||
flake8==4.0.1 \
|
||||
requests \
|
||||
thefuzz \
|
||||
tqdm==4.66.4 \
|
||||
types-requests \
|
||||
unidiff \
|
||||
jwt \
|
||||
&& rm -rf /root/.cache/pip
|
||||
COPY requirements.txt /
|
||||
RUN pip3 install --no-cache-dir -r requirements.txt
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
ENV LC_ALL en_US.UTF-8
|
||||
|
58
docker/test/style/requirements.txt
Normal file
58
docker/test/style/requirements.txt
Normal file
@ -0,0 +1,58 @@
|
||||
aiohttp==3.9.5
|
||||
aiosignal==1.3.1
|
||||
astroid==3.1.0
|
||||
async-timeout==4.0.3
|
||||
attrs==23.2.0
|
||||
black==23.12.0
|
||||
boto3==1.34.131
|
||||
botocore==1.34.131
|
||||
certifi==2024.6.2
|
||||
cffi==1.16.0
|
||||
charset-normalizer==3.3.2
|
||||
click==8.1.7
|
||||
codespell==2.2.1
|
||||
cryptography==42.0.8
|
||||
Deprecated==1.2.14
|
||||
dill==0.3.8
|
||||
flake8==4.0.1
|
||||
frozenlist==1.4.1
|
||||
idna==3.7
|
||||
isort==5.13.2
|
||||
jmespath==1.0.1
|
||||
jwt==1.3.1
|
||||
mccabe==0.6.1
|
||||
multidict==6.0.5
|
||||
mypy==1.8.0
|
||||
mypy-extensions==1.0.0
|
||||
packaging==24.1
|
||||
pathspec==0.9.0
|
||||
pip==24.1.1
|
||||
pipdeptree==2.23.0
|
||||
platformdirs==4.2.2
|
||||
pycodestyle==2.8.0
|
||||
pycparser==2.22
|
||||
pyflakes==2.4.0
|
||||
PyGithub==2.3.0
|
||||
PyJWT==2.8.0
|
||||
pylint==3.1.0
|
||||
PyNaCl==1.5.0
|
||||
python-dateutil==2.9.0.post0
|
||||
python-magic==0.4.24
|
||||
PyYAML==6.0.1
|
||||
rapidfuzz==3.9.3
|
||||
requests==2.32.3
|
||||
s3transfer==0.10.1
|
||||
setuptools==59.6.0
|
||||
six==1.16.0
|
||||
thefuzz==0.22.1
|
||||
tomli==2.0.1
|
||||
tomlkit==0.12.5
|
||||
tqdm==4.66.4
|
||||
types-requests==2.32.0.20240622
|
||||
typing_extensions==4.12.2
|
||||
unidiff==0.7.5
|
||||
urllib3==2.2.2
|
||||
wheel==0.37.1
|
||||
wrapt==1.16.0
|
||||
yamllint==1.26.3
|
||||
yarl==1.9.4
|
26
docs/changelogs/v24.6.2.17-stable.md
Normal file
26
docs/changelogs/v24.6.2.17-stable.md
Normal file
@ -0,0 +1,26 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.6.2.17-stable (5710a8b5c0c) FIXME as compared to v24.6.1.4423-stable (dcced7c8478)
|
||||
|
||||
#### New Feature
|
||||
* Backported in [#66002](https://github.com/ClickHouse/ClickHouse/issues/66002): Add AzureQueue storage. [#65458](https://github.com/ClickHouse/ClickHouse/pull/65458) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#65898](https://github.com/ClickHouse/ClickHouse/issues/65898): Respect cgroup CPU limit in Keeper. [#65819](https://github.com/ClickHouse/ClickHouse/pull/65819) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#65935](https://github.com/ClickHouse/ClickHouse/issues/65935): For queries that read from `PostgreSQL`, cancel the internal `PostgreSQL` query if the ClickHouse query is finished. Otherwise, `ClickHouse` query cannot be canceled until the internal `PostgreSQL` query is finished. [#65771](https://github.com/ClickHouse/ClickHouse/pull/65771) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#65907](https://github.com/ClickHouse/ClickHouse/issues/65907): Fix bug with session closing in Keeper. [#65735](https://github.com/ClickHouse/ClickHouse/pull/65735) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#65962](https://github.com/ClickHouse/ClickHouse/issues/65962): Add missing workload identity changes. [#65848](https://github.com/ClickHouse/ClickHouse/pull/65848) ([SmitaRKulkarni](https://github.com/SmitaRKulkarni)).
|
||||
* Backported in [#66033](https://github.com/ClickHouse/ClickHouse/issues/66033): Follow up to [#65046](https://github.com/ClickHouse/ClickHouse/issues/65046). [#65928](https://github.com/ClickHouse/ClickHouse/pull/65928) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#66076](https://github.com/ClickHouse/ClickHouse/issues/66076): Fix support of non-const scale arguments in rounding functions. [#65983](https://github.com/ClickHouse/ClickHouse/pull/65983) ([Mikhail Gorshkov](https://github.com/mgorshkov)).
|
||||
* Backported in [#66017](https://github.com/ClickHouse/ClickHouse/issues/66017): Fix race in s3queue. [#65986](https://github.com/ClickHouse/ClickHouse/pull/65986) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
@ -84,6 +84,7 @@ The BACKUP and RESTORE statements take a list of DATABASE and TABLE names, a des
|
||||
- [`compression_method`](/docs/en/sql-reference/statements/create/table.md/#column-compression-codecs) and compression_level
|
||||
- `password` for the file on disk
|
||||
- `base_backup`: the destination of the previous backup of this source. For example, `Disk('backups', '1.zip')`
|
||||
- `use_same_s3_credentials_for_base_backup`: whether base backup to S3 should inherit credentials from the query. Only works with `S3`.
|
||||
- `structure_only`: if enabled, allows to only backup or restore the CREATE statements without the data of tables
|
||||
- `storage_policy`: storage policy for the tables being restored. See [Using Multiple Block Devices for Data Storage](../engines/table-engines/mergetree-family/mergetree.md#table_engine-mergetree-multiple-volumes). This setting is only applicable to the `RESTORE` command. The specified storage policy applies only to tables with an engine from the `MergeTree` family.
|
||||
- `s3_storage_class`: the storage class used for S3 backup. For example, `STANDARD`
|
||||
|
@ -974,6 +974,13 @@ Default value: false
|
||||
|
||||
- [exclude_deleted_rows_for_part_size_in_merge](#exclude_deleted_rows_for_part_size_in_merge) setting
|
||||
|
||||
## use_compact_variant_discriminators_serialization {#use_compact_variant_discriminators_serialization}
|
||||
|
||||
Enables compact mode for binary serialization of discriminators in Variant data type.
|
||||
This mode allows to use significantly less memory for storing discriminators in parts when there is mostly one variant or a lot of NULL values.
|
||||
|
||||
Default value: true
|
||||
|
||||
## merge_workload
|
||||
|
||||
Used to regulate how resources are utilized and shared between merges and other workloads. Specified value is used as `workload` setting value for background merges of this table. If not specified (empty string), then server setting `merge_workload` is used instead.
|
||||
|
30
docs/en/operations/startup-scripts.md
Normal file
30
docs/en/operations/startup-scripts.md
Normal file
@ -0,0 +1,30 @@
|
||||
---
|
||||
slug: /en/operations/startup-scripts
|
||||
sidebar_label: Startup Scripts
|
||||
---
|
||||
|
||||
# Startup Scripts
|
||||
|
||||
ClickHouse can run arbitrary SQL queries from the server configuration during startup. This can be useful for migrations or automatic schema creation.
|
||||
|
||||
```xml
|
||||
<clickhouse>
|
||||
<startup_scripts>
|
||||
<scripts>
|
||||
<query>CREATE ROLE OR REPLACE test_role</query>
|
||||
</scripts>
|
||||
<scripts>
|
||||
<query>CREATE TABLE TestTable (id UInt64) ENGINE=TinyLog</query>
|
||||
<condition>SELECT 1;</condition>
|
||||
</scripts>
|
||||
</startup_scripts>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
ClickHouse executes all queries from the `startup_scripts` sequentially in the specified order. If any of the queries fail, the execution of the following queries won't be interrupted.
|
||||
|
||||
You can specify a conditional query in the config. In that case, the corresponding query executes only when the condition query returns the value `1` or `true`.
|
||||
|
||||
:::note
|
||||
If the condition query returns any other value than `1` or `true`, the result will be interpreted as `false`, and the corresponding won't be executed.
|
||||
:::
|
@ -4,35 +4,56 @@ sidebar_position: 59
|
||||
sidebar_label: clickhouse-disks
|
||||
---
|
||||
|
||||
# clickhouse-disks
|
||||
# Clickhouse-disks
|
||||
|
||||
A utility providing filesystem-like operations for ClickHouse disks.
|
||||
A utility providing filesystem-like operations for ClickHouse disks. It can work in both interactive and not interactive modes.
|
||||
|
||||
Program-wide options:
|
||||
## Program-wide options
|
||||
|
||||
* `--config-file, -C` -- path to ClickHouse config, defaults to `/etc/clickhouse-server/config.xml`.
|
||||
* `--save-logs` -- Log progress of invoked commands to `/var/log/clickhouse-server/clickhouse-disks.log`.
|
||||
* `--log-level` -- What [type](../server-configuration-parameters/settings#server_configuration_parameters-logger) of events to log, defaults to `none`.
|
||||
* `--disk` -- what disk to use for `mkdir, move, read, write, remove` commands. Defaults to `default`.
|
||||
* `--query, -q` -- single query that can be executed without launching interactive mode
|
||||
* `--help, -h` -- print all the options and commands with description
|
||||
|
||||
## Default Disks
|
||||
After the launch two disks are initialized. The first one is a disk `local` that is supposed to imitate local file system from which clickhouse-disks utility was launched. The second one is a disk `default` that is mounted to the local filesystem in the directory that can be found in config as a parameter `clickhouse/path` (default value is `/var/lib/clickhouse`).
|
||||
|
||||
## Clickhouse-disks state
|
||||
For each disk that was added the utility stores current directory (as in a usual filesystem). User can change current directory and switch between disks.
|
||||
|
||||
State is reflected in a prompt "`disk_name`:`path_name`"
|
||||
|
||||
## Commands
|
||||
|
||||
* `copy [--disk-from d1] [--disk-to d2] <FROM_PATH> <TO_PATH>`.
|
||||
Recursively copy data from `FROM_PATH` at disk `d1` (defaults to `disk` value if not provided)
|
||||
to `TO_PATH` at disk `d2` (defaults to `disk` value if not provided).
|
||||
* `move <FROM_PATH> <TO_PATH>`.
|
||||
Move file or directory from `FROM_PATH` to `TO_PATH`.
|
||||
* `remove <PATH>`.
|
||||
Remove `PATH` recursively.
|
||||
* `link <FROM_PATH> <TO_PATH>`.
|
||||
Create a hardlink from `FROM_PATH` to `TO_PATH`.
|
||||
* `list [--recursive] <PATH>...`
|
||||
List files at `PATH`s. Non-recursive by default.
|
||||
* `list-disks`.
|
||||
In these documentation file all mandatory positional arguments are referred as `<parameter>`, named arguments are referred as `[--parameter value]`. All positional parameters could be mentioned as a named parameter with a corresponding name.
|
||||
|
||||
* `cd (change-dir, change_dir) [--disk disk] <path>`
|
||||
Change directory to path `path` on disk `disk` (default value is a current disk). No disk switching happens.
|
||||
* `copy (cp) [--disk-from disk_1] [--disk-to disk_2] <path-from> <path-to>`.
|
||||
Recursively copy data from `path-from` at disk `disk_1` (default value is a current disk (parameter `disk` in a non-interactive mode))
|
||||
to `path-to` at disk `disk_2` (default value is a current disk (parameter `disk` in a non-interactive mode)).
|
||||
* `current_disk_with_path (current, current_disk, current_path)`
|
||||
Print current state in format:
|
||||
`Disk: "current_disk" Path: "current path on current disk"`
|
||||
* `help [<command>]`
|
||||
Print help message about command `command`. If `command` is not specified print information about all commands.
|
||||
* `move (mv) <path-from> <path-to>`.
|
||||
Move file or directory from `path-from` to `path-to` within current disk.
|
||||
* `remove (rm, delete) <path>`.
|
||||
Remove `path` recursively on a current disk.
|
||||
* `link (ln) <path-from> <path-to>`.
|
||||
Create a hardlink from `path-from` to `path-to` on a current disk.
|
||||
* `list (ls) [--recursive] <path>`
|
||||
List files at `path`s on a current disk. Non-recursive by default.
|
||||
* `list-disks (list_disks, ls-disks, ls_disks)`.
|
||||
List disks names.
|
||||
* `mkdir [--recursive] <PATH>`.
|
||||
* `mkdir [--recursive] <path>` on a current disk.
|
||||
Create a directory. Non-recursive by default.
|
||||
* `read: <FROM_PATH> [<TO_PATH>]`
|
||||
Read a file from `FROM_PATH` to `TO_PATH` (`stdout` if not supplied).
|
||||
* `write [FROM_PATH] <TO_PATH>`.
|
||||
Write a file from `FROM_PATH` (`stdin` if not supplied) to `TO_PATH`.
|
||||
* `read (r) <path-from> [--path-to path]`
|
||||
Read a file from `path-from` to `path` (`stdout` if not supplied).
|
||||
* `switch-disk [--path path] <disk>`
|
||||
Switch to disk `disk` on path `path` (if `path` is not specified default value is a previous path on disk `disk`).
|
||||
* `write (w) [--path-from path] <path-to>`.
|
||||
Write a file from `path` (`stdin` if `path` is not supplied, input must finish by Ctrl+D) to `path-to`.
|
||||
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
slug: /en/sql-reference/aggregate-functions/reference/aggthrow
|
||||
sidebar_position: 101
|
||||
---
|
||||
|
||||
# aggThrow
|
||||
|
||||
This function can be used for the purpose of testing exception safety. It will throw an exception on creation with the specified probability.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
aggThrow(throw_prob)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `throw_prob` — Probability to throw on creation. [Float64](../../data-types/float.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An exception: `Code: 503. DB::Exception: Aggregate function aggThrow has thrown exception successfully`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT number % 2 AS even, aggThrow(number) FROM numbers(10) GROUP BY even;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
Received exception:
|
||||
Code: 503. DB::Exception: Aggregate function aggThrow has thrown exception successfully: While executing AggregatingTransform. (AGGREGATE_FUNCTION_THROW)
|
||||
```
|
@ -43,6 +43,7 @@ Standard aggregate functions:
|
||||
|
||||
ClickHouse-specific aggregate functions:
|
||||
|
||||
- [aggThrow](../reference/aggthrow.md)
|
||||
- [analysisOfVariance](../reference/analysis_of_variance.md)
|
||||
- [any](../reference/any_respect_nulls.md)
|
||||
- [anyHeavy](../reference/anyheavy.md)
|
||||
|
@ -83,7 +83,57 @@ Result:
|
||||
```
|
||||
## makeDate32
|
||||
|
||||
Like [makeDate](#makedate) but produces a [Date32](../data-types/date32.md).
|
||||
Creates a date of type [Date32](../../sql-reference/data-types/date32.md) from a year, month, day (or optionally a year and a day).
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
makeDate32(year, [month,] day)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `year` — Year. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `month` — Month (optional). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `day` — Day. [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
|
||||
:::note
|
||||
If `month` is omitted then `day` should take a value between `1` and `365`, otherwise it should take a value between `1` and `31`.
|
||||
:::
|
||||
|
||||
**Returned values**
|
||||
|
||||
- A date created from the arguments. [Date32](../../sql-reference/data-types/date32.md).
|
||||
|
||||
**Examples**
|
||||
|
||||
Create a date from a year, month, and day:
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT makeDate32(2024, 1, 1);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
2024-01-01
|
||||
```
|
||||
|
||||
Create a Date from a year and day of year:
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT makeDate32(2024, 100);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
2024-04-09
|
||||
```
|
||||
|
||||
## makeDateTime
|
||||
|
||||
@ -125,12 +175,38 @@ Result:
|
||||
|
||||
## makeDateTime64
|
||||
|
||||
Like [makeDateTime](#makedatetime) but produces a [DateTime64](../data-types/datetime64.md).
|
||||
Creates a [DateTime64](../../sql-reference/data-types/datetime64.md) data type value from its components: year, month, day, hour, minute, second. With optional sub-second precision.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
makeDateTime64(year, month, day, hour, minute, second[, precision])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `year` — Year (0-9999). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `month` — Month (1-12). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `day` — Day (1-31). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `hour` — Hour (0-23). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `minute` — Minute (0-59). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `second` — Second (0-59). [Integer](../../sql-reference/data-types/int-uint.md), [Float](../../sql-reference/data-types/float.md) or [Decimal](../../sql-reference/data-types/decimal.md).
|
||||
- `precision` — Optional precision of the sub-second component (0-9). [Integer](../../sql-reference/data-types/int-uint.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- A date and time created from the supplied arguments. [DateTime64](../../sql-reference/data-types/datetime64.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
makeDateTime64(year, month, day, hour, minute, second[, fraction[, precision[, timezone]]])
|
||||
SELECT makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5);
|
||||
```
|
||||
|
||||
```response
|
||||
┌─makeDateTime64(2023, 5, 15, 10, 30, 45, 779, 5)─┐
|
||||
│ 2023-05-15 10:30:45.00779 │
|
||||
└─────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## timestamp
|
||||
|
@ -86,7 +86,7 @@ Returns the fully qualified domain name of the ClickHouse server.
|
||||
fqdn();
|
||||
```
|
||||
|
||||
This function is case-insensitive.
|
||||
Aliases: `fullHostName`, 'FQDN'.
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -6,41 +6,119 @@ sidebar_label: Time Window
|
||||
|
||||
# Time Window Functions
|
||||
|
||||
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with WindowView are listed below:
|
||||
Time window functions return the inclusive lower and exclusive upper bound of the corresponding window. The functions for working with [WindowView](../statements/create/view.md/#window-view-experimental) are listed below:
|
||||
|
||||
## tumble
|
||||
|
||||
A tumbling time window assigns records to non-overlapping, continuous windows with a fixed duration (`interval`).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tumble(time_attr, interval [, timezone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
- `time_attr` - Date and time. [DateTime](../data-types/datetime.md) data type.
|
||||
- `interval` - Window interval in [Interval](../data-types/special-data-types/interval.md) data type.
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `interval` — Window interval in [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The inclusive lower and exclusive upper bound of the corresponding tumbling window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
|
||||
- The inclusive lower and exclusive upper bound of the corresponding tumbling window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT tumble(now(), toIntervalDay('1'))
|
||||
SELECT tumble(now(), toIntervalDay('1'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─tumble(now(), toIntervalDay('1'))─────────────┐
|
||||
│ ['2020-01-01 00:00:00','2020-01-02 00:00:00'] │
|
||||
│ ('2024-07-04 00:00:00','2024-07-05 00:00:00') │
|
||||
└───────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tumbleStart
|
||||
|
||||
Returns the inclusive lower bound of the corresponding [tumbling window](#tumble).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tumbleStart(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `interval` — Window interval in [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The inclusive lower bound of the corresponding tumbling window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tumbleStart(now(), toIntervalDay('1'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─tumbleStart(now(), toIntervalDay('1'))─┐
|
||||
│ 2024-07-04 00:00:00 │
|
||||
└────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tumbleEnd
|
||||
|
||||
Returns the exclusive upper bound of the corresponding [tumbling window](#tumble).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
tumbleEnd(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `interval` — Window interval in [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The inclusive lower bound of the corresponding tumbling window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT tumbleEnd(now(), toIntervalDay('1'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─tumbleEnd(now(), toIntervalDay('1'))─┐
|
||||
│ 2024-07-05 00:00:00 │
|
||||
└──────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hop
|
||||
|
||||
A hopping time window has a fixed duration (`window_interval`) and hops by a specified hop interval (`hop_interval`). If the `hop_interval` is smaller than the `window_interval`, hopping windows are overlapping. Thus, records can be assigned to multiple windows.
|
||||
@ -51,65 +129,118 @@ hop(time_attr, hop_interval, window_interval [, timezone])
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time_attr` - Date and time. [DateTime](../data-types/datetime.md) data type.
|
||||
- `hop_interval` - Hop interval in [Interval](../data-types/special-data-types/interval.md) data type. Should be a positive number.
|
||||
- `window_interval` - Window interval in [Interval](../data-types/special-data-types/interval.md) data type. Should be a positive number.
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The inclusive lower and exclusive upper bound of the corresponding hopping window. Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
|
||||
- The inclusive lower and exclusive upper bound of the corresponding hopping window. [Tuple](../data-types/tuple.md)([DateTime](../data-types/datetime.md), [DateTime](../data-types/datetime.md))`.
|
||||
|
||||
:::note
|
||||
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hop(now(), INTERVAL '1' SECOND, INTERVAL '2' SECOND)
|
||||
SELECT hop(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hop(now(), toIntervalSecond('1'), toIntervalSecond('2'))──┐
|
||||
│ ('2020-01-14 16:58:22','2020-01-14 16:58:24') │
|
||||
└───────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## tumbleStart
|
||||
|
||||
Returns the inclusive lower bound of the corresponding tumbling window.
|
||||
|
||||
``` sql
|
||||
tumbleStart(bounds_tuple);
|
||||
tumbleStart(time_attr, interval [, timezone]);
|
||||
```
|
||||
|
||||
## tumbleEnd
|
||||
|
||||
Returns the exclusive upper bound of the corresponding tumbling window.
|
||||
|
||||
``` sql
|
||||
tumbleEnd(bounds_tuple);
|
||||
tumbleEnd(time_attr, interval [, timezone]);
|
||||
┌─hop(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
|
||||
│ ('2024-07-03 00:00:00','2024-07-05 00:00:00') │
|
||||
└────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hopStart
|
||||
|
||||
Returns the inclusive lower bound of the corresponding hopping window.
|
||||
Returns the inclusive lower bound of the corresponding [hopping window](#hop).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hopStart(bounds_tuple);
|
||||
hopStart(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The inclusive lower bound of the corresponding hopping window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
|
||||
|
||||
:::note
|
||||
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hopStart(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hopStart(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
|
||||
│ 2024-07-03 00:00:00 │
|
||||
└─────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## hopEnd
|
||||
|
||||
Returns the exclusive upper bound of the corresponding hopping window.
|
||||
Returns the exclusive upper bound of the corresponding [hopping window](#hop).
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
hopEnd(bounds_tuple);
|
||||
hopEnd(time_attr, hop_interval, window_interval [, timezone]);
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `time_attr` — Date and time. [DateTime](../data-types/datetime.md).
|
||||
- `hop_interval` — Positive Hop interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `window_interval` — Positive Window interval. [Interval](../data-types/special-data-types/interval.md).
|
||||
- `timezone` — [Timezone name](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-timezone) (optional).
|
||||
|
||||
The parameters above can also be passed to the function as a [tuple](../data-types/tuple.md).
|
||||
|
||||
**Returned values**
|
||||
|
||||
- The exclusive upper bound of the corresponding hopping window. [DateTime](../data-types/datetime.md), [Tuple](../data-types/tuple.md) or [UInt32](../data-types/int-uint.md).
|
||||
|
||||
:::note
|
||||
Since one record can be assigned to multiple hop windows, the function only returns the bound of the **first** window when hop function is used **without** `WINDOW VIEW`.
|
||||
:::
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT hopEnd(now(), INTERVAL '1' DAY, INTERVAL '2' DAY);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─hopEnd(now(), toIntervalDay('1'), toIntervalDay('2'))─┐
|
||||
│ 2024-07-05 00:00:00 │
|
||||
└───────────────────────────────────────────────────────┘
|
||||
|
||||
```
|
||||
|
||||
## Related content
|
||||
|
@ -600,7 +600,7 @@ mapApply(func, map)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `func` - [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `func` — [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `map` — [Map](../data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
@ -833,6 +833,38 @@ SELECT mapSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
|
||||
For more details see the [reference](../../sql-reference/functions/array-functions.md#array_functions-sort) for `arraySort` function.
|
||||
|
||||
## mapPartialSort
|
||||
|
||||
Sorts the elements of a map in ascending order with additional `limit` argument allowing partial sorting.
|
||||
If the `func` function is specified, the sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapPartialSort([func,] limit, map)
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `func` – Optional function to apply to the keys and values of the map. [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `limit` – Elements in range [1..limit] are sorted. [(U)Int](../data-types/int-uint.md).
|
||||
- `map` – Map to sort. [Map](../data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Partially sorted map. [Map](../data-types/map.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT mapPartialSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2));
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─mapPartialSort(lambda(tuple(k, v), v), 2, map('k1', 3, 'k2', 1, 'k3', 2))─┐
|
||||
│ {'k2':1,'k3':2,'k1':3} │
|
||||
└───────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## mapReverseSort(\[func,\], map)
|
||||
|
||||
Sorts the elements of a map in descending order.
|
||||
@ -861,3 +893,35 @@ SELECT mapReverseSort((k, v) -> v, map('key2', 2, 'key3', 1, 'key1', 3)) AS map;
|
||||
```
|
||||
|
||||
For more details see function [arrayReverseSort](../../sql-reference/functions/array-functions.md#array_functions-reverse-sort).
|
||||
|
||||
## mapPartialReverseSort
|
||||
|
||||
Sorts the elements of a map in descending order with additional `limit` argument allowing partial sorting.
|
||||
If the `func` function is specified, the sorting order is determined by the result of the `func` function applied to the keys and values of the map.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
mapPartialReverseSort([func,] limit, map)
|
||||
```
|
||||
**Arguments**
|
||||
|
||||
- `func` – Optional function to apply to the keys and values of the map. [Lambda function](../../sql-reference/functions/index.md#higher-order-functions---operator-and-lambdaparams-expr-function).
|
||||
- `limit` – Elements in range [1..limit] are sorted. [(U)Int](../data-types/int-uint.md).
|
||||
- `map` – Map to sort. [Map](../data-types/map.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Partially sorted map. [Map](../data-types/map.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT mapPartialReverseSort((k, v) -> v, 2, map('k1', 3, 'k2', 1, 'k3', 2));
|
||||
```
|
||||
|
||||
``` text
|
||||
┌─mapPartialReverseSort(lambda(tuple(k, v), v), 2, map('k1', 3, 'k2', 1, 'k3', 2))─┐
|
||||
│ {'k1':3,'k3':2,'k2':1} │
|
||||
└──────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
36
docs/en/sql-reference/table-functions/fuzzQuery.md
Normal file
36
docs/en/sql-reference/table-functions/fuzzQuery.md
Normal file
@ -0,0 +1,36 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/fuzzQuery
|
||||
sidebar_position: 75
|
||||
sidebar_label: fuzzQuery
|
||||
---
|
||||
|
||||
# fuzzQuery
|
||||
|
||||
Perturbs the given query string with random variations.
|
||||
|
||||
``` sql
|
||||
fuzzQuery(query[, max_query_length[, random_seed]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `query` (String) - The source query to perform the fuzzing on.
|
||||
- `max_query_length` (UInt64) - A maximum length the query can get during the fuzzing process.
|
||||
- `random_seed` (UInt64) - A random seed for producing stable results.
|
||||
|
||||
**Returned Value**
|
||||
|
||||
A table object with a single column containing perturbed query strings.
|
||||
|
||||
## Usage Example
|
||||
|
||||
``` sql
|
||||
SELECT * FROM fuzzQuery('SELECT materialize(\'a\' AS key) GROUP BY key') LIMIT 2;
|
||||
```
|
||||
|
||||
```
|
||||
┌─query──────────────────────────────────────────────────────────┐
|
||||
1. │ SELECT 'a' AS key GROUP BY key │
|
||||
2. │ EXPLAIN PIPELINE compact = true SELECT 'a' AS key GROUP BY key │
|
||||
└────────────────────────────────────────────────────────────────┘
|
||||
```
|
@ -23,6 +23,7 @@ ClickHouse supports the standard grammar for defining windows and window functio
|
||||
| `GROUPS` frame | ❌ |
|
||||
| Calculating aggregate functions over a frame (`sum(value) over (order by time)`) | ✅ (All aggregate functions are supported) |
|
||||
| `rank()`, `dense_rank()`, `row_number()` | ✅ |
|
||||
| `percent_rank()` | ✅ Efficiently computes the relative standing of a value within a partition in a dataset. This function effectively replaces the more verbose and computationally intensive manual SQL calculation expressed as `ifNull((rank() OVER(PARTITION BY x ORDER BY y) - 1) / nullif(count(1) OVER(PARTITION BY x) - 1, 0), 0)`|
|
||||
| `lag/lead(value, offset)` | ❌ <br/> You can use one of the following workarounds:<br/> 1) `any(value) over (.... rows between <offset> preceding and <offset> preceding)`, or `following` for `lead` <br/> 2) `lagInFrame/leadInFrame`, which are analogous, but respect the window frame. To get behavior identical to `lag/lead`, use `rows between unbounded preceding and unbounded following` |
|
||||
| ntile(buckets) | ✅ <br/> Specify window like, (partition by x order by y rows between unbounded preceding and unrounded following). |
|
||||
|
||||
|
@ -9,7 +9,10 @@ namespace DB
|
||||
class Client : public ClientBase
|
||||
{
|
||||
public:
|
||||
Client() = default;
|
||||
Client()
|
||||
{
|
||||
fuzzer = QueryFuzzer(randomSeed(), &std::cout, &std::cerr);
|
||||
}
|
||||
|
||||
void initialize(Poco::Util::Application & self) override;
|
||||
|
||||
|
@ -1,6 +1,8 @@
|
||||
set (CLICKHOUSE_DISKS_SOURCES
|
||||
DisksApp.cpp
|
||||
DisksClient.cpp
|
||||
ICommand.cpp
|
||||
CommandChangeDirectory.cpp
|
||||
CommandCopy.cpp
|
||||
CommandLink.cpp
|
||||
CommandList.cpp
|
||||
@ -9,10 +11,14 @@ set (CLICKHOUSE_DISKS_SOURCES
|
||||
CommandMove.cpp
|
||||
CommandRead.cpp
|
||||
CommandRemove.cpp
|
||||
CommandWrite.cpp)
|
||||
CommandSwitchDisk.cpp
|
||||
CommandWrite.cpp
|
||||
CommandHelp.cpp
|
||||
CommandTouch.cpp
|
||||
CommandGetCurrentDiskAndPath.cpp)
|
||||
|
||||
if (CLICKHOUSE_CLOUD)
|
||||
set (CLICKHOUSE_DISKS_SOURCES ${CLICKHOUSE_DISKS_SOURCES} CommandPackedIO.cpp)
|
||||
set (CLICKHOUSE_DISKS_SOURCES ${CLICKHOUSE_DISKS_SOURCES} CommandPackedIO.cpp)
|
||||
endif ()
|
||||
|
||||
set (CLICKHOUSE_DISKS_LINK
|
||||
|
35
programs/disks/CommandChangeDirectory.cpp
Normal file
35
programs/disks/CommandChangeDirectory.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksApp.h"
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CommandChangeDirectory final : public ICommand
|
||||
{
|
||||
public:
|
||||
explicit CommandChangeDirectory() : ICommand()
|
||||
{
|
||||
command_name = "cd";
|
||||
description = "Change directory (makes sense only in interactive mode)";
|
||||
options_description.add_options()("path", po::value<String>(), "the path to which we want to change (mandatory, positional)")(
|
||||
"disk", po::value<String>(), "A disk where the path is changed (without disk switching)");
|
||||
positional_options_description.add("path", 1);
|
||||
}
|
||||
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
DiskWithPath & disk = getDiskWithPath(client, options, "disk");
|
||||
String path = getValueFromCommandLineOptionsThrow<String>(options, "path");
|
||||
disk.setPath(path);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandChangeDirectory()
|
||||
{
|
||||
return std::make_shared<DB::CommandChangeDirectory>();
|
||||
}
|
||||
|
||||
}
|
@ -1,6 +1,8 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include "Common/Exception.h"
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -10,59 +12,89 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
class CommandCopy final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandCopy()
|
||||
explicit CommandCopy() : ICommand()
|
||||
{
|
||||
command_name = "copy";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "Recursively copy data from `FROM_PATH` to `TO_PATH`";
|
||||
usage = "copy [OPTION]... <FROM_PATH> <TO_PATH>";
|
||||
command_option_description->add_options()
|
||||
("disk-from", po::value<String>(), "disk from which we copy")
|
||||
("disk-to", po::value<String>(), "disk to which we copy");
|
||||
description = "Recursively copy data from `path-from` to `path-to`";
|
||||
options_description.add_options()(
|
||||
"disk-from", po::value<String>(), "disk from which we copy is executed (default value is a current disk)")(
|
||||
"disk-to", po::value<String>(), "disk to which copy is executed (default value is a current disk)")(
|
||||
"path-from", po::value<String>(), "path from which copy is executed (mandatory, positional)")(
|
||||
"path-to", po::value<String>(), "path to which copy is executed (mandatory, positional)")(
|
||||
"recursive,r", "recursively copy the directory (required to remove a directory)");
|
||||
positional_options_description.add("path-from", 1);
|
||||
positional_options_description.add("path-to", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (options.count("disk-from"))
|
||||
config.setString("disk-from", options["disk-from"].as<String>());
|
||||
if (options.count("disk-to"))
|
||||
config.setString("disk-to", options["disk-to"].as<String>());
|
||||
}
|
||||
auto disk_from = getDiskWithPath(client, options, "disk-from");
|
||||
auto disk_to = getDiskWithPath(client, options, "disk-to");
|
||||
String path_from = disk_from.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
|
||||
String path_to = disk_to.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-to"));
|
||||
bool recursive = options.count("recursive");
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 2)
|
||||
if (!disk_from.getDisk()->exists(path_from))
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"cannot stat '{}' on disk '{}': No such file or directory",
|
||||
path_from,
|
||||
disk_from.getDisk()->getName());
|
||||
}
|
||||
else if (disk_from.getDisk()->isFile(path_from))
|
||||
{
|
||||
auto target_location = getTargetLocation(path_from, disk_to, path_to);
|
||||
if (!disk_to.getDisk()->exists(target_location) || disk_to.getDisk()->isFile(target_location))
|
||||
{
|
||||
disk_from.getDisk()->copyFile(
|
||||
path_from,
|
||||
*disk_to.getDisk(),
|
||||
target_location,
|
||||
/* read_settings= */ {},
|
||||
/* write_settings= */ {},
|
||||
/* cancellation_hook= */ {});
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS, "cannot overwrite directory {} with non-directory {}", target_location, path_from);
|
||||
}
|
||||
}
|
||||
else if (disk_from.getDisk()->isDirectory(path_from))
|
||||
{
|
||||
if (!recursive)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "--recursive not specified; omitting directory {}", path_from);
|
||||
}
|
||||
auto target_location = getTargetLocation(path_from, disk_to, path_to);
|
||||
|
||||
String disk_name_from = config.getString("disk-from", config.getString("disk", "default"));
|
||||
String disk_name_to = config.getString("disk-to", config.getString("disk", "default"));
|
||||
|
||||
const String & path_from = command_arguments[0];
|
||||
const String & path_to = command_arguments[1];
|
||||
|
||||
DiskPtr disk_from = disk_selector->get(disk_name_from);
|
||||
DiskPtr disk_to = disk_selector->get(disk_name_to);
|
||||
|
||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||
|
||||
disk_from->copyDirectoryContent(relative_path_from, disk_to, relative_path_to, /* read_settings= */ {}, /* write_settings= */ {}, /* cancellation_hook= */ {});
|
||||
if (disk_to.getDisk()->isFile(target_location))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory {} with directory {}", path_to, target_location);
|
||||
}
|
||||
else if (!disk_to.getDisk()->exists(target_location))
|
||||
{
|
||||
disk_to.getDisk()->createDirectory(target_location);
|
||||
}
|
||||
disk_from.getDisk()->copyDirectoryContent(
|
||||
path_from,
|
||||
disk_to.getDisk(),
|
||||
target_location,
|
||||
/* read_settings= */ {},
|
||||
/* write_settings= */ {},
|
||||
/* cancellation_hook= */ {});
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandCopy()
|
||||
{
|
||||
return std::make_shared<DB::CommandCopy>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandCopy()
|
||||
{
|
||||
return std::make_unique<DB::CommandCopy>();
|
||||
}
|
||||
|
30
programs/disks/CommandGetCurrentDiskAndPath.cpp
Normal file
30
programs/disks/CommandGetCurrentDiskAndPath.cpp
Normal file
@ -0,0 +1,30 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksApp.h"
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CommandGetCurrentDiskAndPath final : public ICommand
|
||||
{
|
||||
public:
|
||||
explicit CommandGetCurrentDiskAndPath() : ICommand()
|
||||
{
|
||||
command_name = "current_disk_with_path";
|
||||
description = "Prints current disk and path (which coincide with the prompt)";
|
||||
}
|
||||
|
||||
void executeImpl(const CommandLineOptions &, DisksClient & client) override
|
||||
{
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
std::cout << "Disk: " << disk.getDisk()->getName() << "\nPath: " << disk.getCurrentPath() << std::endl;
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandGetCurrentDiskAndPath()
|
||||
{
|
||||
return std::make_shared<DB::CommandGetCurrentDiskAndPath>();
|
||||
}
|
||||
}
|
43
programs/disks/CommandHelp.cpp
Normal file
43
programs/disks/CommandHelp.cpp
Normal file
@ -0,0 +1,43 @@
|
||||
#include "DisksApp.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CommandHelp final : public ICommand
|
||||
{
|
||||
public:
|
||||
explicit CommandHelp(const DisksApp & disks_app_) : disks_app(disks_app_)
|
||||
{
|
||||
command_name = "help";
|
||||
description = "Print help message about available commands";
|
||||
options_description.add_options()(
|
||||
"command", po::value<String>(), "A command to help with (optional, positional), if not specified, help lists all the commands");
|
||||
positional_options_description.add("command", 1);
|
||||
}
|
||||
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & /*client*/) override
|
||||
{
|
||||
std::optional<String> command = getValueFromCommandLineOptionsWithOptional<String>(options, "command");
|
||||
if (command.has_value())
|
||||
{
|
||||
disks_app.printCommandHelpMessage(command.value());
|
||||
}
|
||||
else
|
||||
{
|
||||
disks_app.printAvailableCommandsHelpMessage();
|
||||
}
|
||||
}
|
||||
|
||||
const DisksApp & disks_app;
|
||||
};
|
||||
|
||||
CommandPtr makeCommandHelp(const DisksApp & disks_app)
|
||||
{
|
||||
return std::make_shared<DB::CommandHelp>(disks_app);
|
||||
}
|
||||
|
||||
}
|
@ -1,14 +1,9 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandLink final : public ICommand
|
||||
{
|
||||
public:
|
||||
@ -16,42 +11,27 @@ public:
|
||||
{
|
||||
command_name = "link";
|
||||
description = "Create hardlink from `from_path` to `to_path`";
|
||||
usage = "link [OPTION]... <FROM_PATH> <TO_PATH>";
|
||||
options_description.add_options()(
|
||||
"path-from", po::value<String>(), "the path from which a hard link will be created (mandatory, positional)")(
|
||||
"path-to", po::value<String>(), "the path where a hard link will be created (mandatory, positional)");
|
||||
positional_options_description.add("path-from", 1);
|
||||
positional_options_description.add("path-to", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration &,
|
||||
po::variables_map &) const override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
}
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 2)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
const String & path_from = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
|
||||
const String & path_to = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-to"));
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path_from = command_arguments[0];
|
||||
const String & path_to = command_arguments[1];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||
|
||||
disk->createHardLink(relative_path_from, relative_path_to);
|
||||
disk.getDisk()->createHardLink(path_from, path_to);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandLink()
|
||||
{
|
||||
return std::make_shared<DB::CommandLink>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandLink()
|
||||
{
|
||||
return std::make_unique<DB::CommandLink>();
|
||||
}
|
||||
|
@ -1,98 +1,95 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksApp.h"
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandList final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandList()
|
||||
explicit CommandList() : ICommand()
|
||||
{
|
||||
command_name = "list";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "List files at path[s]";
|
||||
usage = "list [OPTION]... <PATH>...";
|
||||
command_option_description->add_options()
|
||||
("recursive", "recursively list all directories");
|
||||
options_description.add_options()("recursive", "recursively list the directory")("all", "show hidden files")(
|
||||
"path", po::value<String>(), "the path of listing (mandatory, positional)");
|
||||
positional_options_description.add("path", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (options.count("recursive"))
|
||||
config.setBool("recursive", true);
|
||||
}
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path = command_arguments[0];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path = validatePathAndGetAsRelative(path);
|
||||
|
||||
bool recursive = config.getBool("recursive", false);
|
||||
bool recursive = options.count("recursive");
|
||||
bool show_hidden = options.count("all");
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
String path = getValueFromCommandLineOptionsWithDefault<String>(options, "path", ".");
|
||||
|
||||
if (recursive)
|
||||
listRecursive(disk, relative_path);
|
||||
listRecursive(disk, path, show_hidden);
|
||||
else
|
||||
list(disk, relative_path);
|
||||
list(disk, path, show_hidden);
|
||||
}
|
||||
|
||||
private:
|
||||
static void list(const DiskPtr & disk, const std::string & relative_path)
|
||||
static void list(const DiskWithPath & disk, const std::string & path, bool show_hidden)
|
||||
{
|
||||
std::vector<String> file_names;
|
||||
disk->listFiles(relative_path, file_names);
|
||||
std::vector<String> file_names = disk.listAllFilesByPath(path);
|
||||
std::vector<String> selected_and_sorted_file_names{};
|
||||
|
||||
for (const auto & file_name : file_names)
|
||||
std::cout << file_name << '\n';
|
||||
if (show_hidden || (!file_name.starts_with('.')))
|
||||
selected_and_sorted_file_names.push_back(file_name);
|
||||
|
||||
std::sort(selected_and_sorted_file_names.begin(), selected_and_sorted_file_names.end());
|
||||
for (const auto & file_name : selected_and_sorted_file_names)
|
||||
{
|
||||
std::cout << file_name << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
static void listRecursive(const DiskPtr & disk, const std::string & relative_path)
|
||||
static void listRecursive(const DiskWithPath & disk, const std::string & relative_path, bool show_hidden)
|
||||
{
|
||||
std::vector<String> file_names;
|
||||
disk->listFiles(relative_path, file_names);
|
||||
std::vector<String> file_names = disk.listAllFilesByPath(relative_path);
|
||||
std::vector<String> selected_and_sorted_file_names{};
|
||||
|
||||
std::cout << relative_path << ":\n";
|
||||
|
||||
if (!file_names.empty())
|
||||
{
|
||||
for (const auto & file_name : file_names)
|
||||
std::cout << file_name << '\n';
|
||||
std::cout << "\n";
|
||||
}
|
||||
|
||||
for (const auto & file_name : file_names)
|
||||
if (show_hidden || (!file_name.starts_with('.')))
|
||||
selected_and_sorted_file_names.push_back(file_name);
|
||||
|
||||
std::sort(selected_and_sorted_file_names.begin(), selected_and_sorted_file_names.end());
|
||||
for (const auto & file_name : selected_and_sorted_file_names)
|
||||
{
|
||||
auto path = relative_path.empty() ? file_name : (relative_path + "/" + file_name);
|
||||
if (disk->isDirectory(path))
|
||||
listRecursive(disk, path);
|
||||
std::cout << file_name << "\n";
|
||||
}
|
||||
std::cout << "\n";
|
||||
|
||||
for (const auto & file_name : selected_and_sorted_file_names)
|
||||
{
|
||||
auto path = [&]() -> String
|
||||
{
|
||||
if (relative_path.ends_with("/"))
|
||||
{
|
||||
return relative_path + file_name;
|
||||
}
|
||||
else
|
||||
{
|
||||
return relative_path + "/" + file_name;
|
||||
}
|
||||
}();
|
||||
if (disk.isDirectory(path))
|
||||
{
|
||||
listRecursive(disk, path, show_hidden);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandList()
|
||||
CommandPtr makeCommandList()
|
||||
{
|
||||
return std::make_unique<DB::CommandList>();
|
||||
return std::make_shared<DB::CommandList>();
|
||||
}
|
||||
}
|
||||
|
@ -1,68 +1,40 @@
|
||||
#include "ICommand.h"
|
||||
#include <algorithm>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandListDisks final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandListDisks()
|
||||
explicit CommandListDisks() : ICommand()
|
||||
{
|
||||
command_name = "list-disks";
|
||||
description = "List disks names";
|
||||
usage = "list-disks [OPTION]";
|
||||
description = "Lists all available disks";
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration &,
|
||||
po::variables_map &) const override
|
||||
{}
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> &,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
void executeImpl(const CommandLineOptions &, DisksClient & client) override
|
||||
{
|
||||
if (!command_arguments.empty())
|
||||
std::vector<String> sorted_and_selected{};
|
||||
for (const auto & disk_name : client.getAllDiskNames())
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
sorted_and_selected.push_back(disk_name + ":" + client.getDiskWithPath(disk_name).getAbsolutePath(""));
|
||||
}
|
||||
|
||||
constexpr auto config_prefix = "storage_configuration.disks";
|
||||
constexpr auto default_disk_name = "default";
|
||||
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys(config_prefix, keys);
|
||||
|
||||
bool has_default_disk = false;
|
||||
|
||||
/// For the output to be ordered
|
||||
std::set<String> disks;
|
||||
|
||||
for (const auto & disk_name : keys)
|
||||
std::sort(sorted_and_selected.begin(), sorted_and_selected.end());
|
||||
for (const auto & disk_name : sorted_and_selected)
|
||||
{
|
||||
if (disk_name == default_disk_name)
|
||||
has_default_disk = true;
|
||||
disks.insert(disk_name);
|
||||
std::cout << disk_name << "\n";
|
||||
}
|
||||
|
||||
if (!has_default_disk)
|
||||
disks.insert(default_disk_name);
|
||||
|
||||
for (const auto & disk : disks)
|
||||
std::cout << disk << '\n';
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandListDisks()
|
||||
private:
|
||||
};
|
||||
|
||||
CommandPtr makeCommandListDisks()
|
||||
{
|
||||
return std::make_unique<DB::CommandListDisks>();
|
||||
return std::make_shared<DB::CommandListDisks>();
|
||||
}
|
||||
}
|
||||
|
@ -6,61 +6,35 @@
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandMkDir final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandMkDir()
|
||||
{
|
||||
command_name = "mkdir";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "Create a directory";
|
||||
usage = "mkdir [OPTION]... <PATH>";
|
||||
command_option_description->add_options()
|
||||
("recursive", "recursively create directories");
|
||||
description = "Creates a directory";
|
||||
options_description.add_options()("parents", "recursively create directories")(
|
||||
"path", po::value<String>(), "the path on which directory should be created (mandatory, positional)");
|
||||
positional_options_description.add("path", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (options.count("recursive"))
|
||||
config.setBool("recursive", true);
|
||||
}
|
||||
bool recursive = options.count("parents");
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path = command_arguments[0];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path = validatePathAndGetAsRelative(path);
|
||||
bool recursive = config.getBool("recursive", false);
|
||||
String path = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path"));
|
||||
|
||||
if (recursive)
|
||||
disk->createDirectories(relative_path);
|
||||
disk.getDisk()->createDirectories(path);
|
||||
else
|
||||
disk->createDirectory(relative_path);
|
||||
disk.getDisk()->createDirectory(path);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandMkDir()
|
||||
{
|
||||
return std::make_shared<DB::CommandMkDir>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandMkDir()
|
||||
{
|
||||
return std::make_unique<DB::CommandMkDir>();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -9,6 +9,7 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
class CommandMove final : public ICommand
|
||||
{
|
||||
public:
|
||||
@ -16,44 +17,62 @@ public:
|
||||
{
|
||||
command_name = "move";
|
||||
description = "Move file or directory from `from_path` to `to_path`";
|
||||
usage = "move [OPTION]... <FROM_PATH> <TO_PATH>";
|
||||
options_description.add_options()("path-from", po::value<String>(), "path from which we copy (mandatory, positional)")(
|
||||
"path-to", po::value<String>(), "path to which we copy (mandatory, positional)");
|
||||
positional_options_description.add("path-from", 1);
|
||||
positional_options_description.add("path-to", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration &,
|
||||
po::variables_map &) const override
|
||||
{}
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (command_arguments.size() != 2)
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
|
||||
String path_from = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
|
||||
String path_to = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-to"));
|
||||
|
||||
if (disk.getDisk()->isFile(path_from))
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
disk.getDisk()->moveFile(path_from, path_to);
|
||||
}
|
||||
else if (disk.getDisk()->isDirectory(path_from))
|
||||
{
|
||||
auto target_location = getTargetLocation(path_from, disk, path_to);
|
||||
if (!disk.getDisk()->exists(target_location))
|
||||
{
|
||||
disk.getDisk()->createDirectory(target_location);
|
||||
disk.getDisk()->moveDirectory(path_from, target_location);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (disk.getDisk()->isFile(target_location))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory '{}' with directory '{}'", target_location, path_from);
|
||||
}
|
||||
if (!disk.getDisk()->isDirectoryEmpty(target_location))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot move '{}' to '{}': Directory not empty", path_from, target_location);
|
||||
}
|
||||
else
|
||||
{
|
||||
disk.getDisk()->moveDirectory(path_from, target_location);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (!disk.getDisk()->exists(path_from))
|
||||
{
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"cannot stat '{}' on disk: '{}': No such file or directory",
|
||||
path_from,
|
||||
disk.getDisk()->getName());
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path_from = command_arguments[0];
|
||||
const String & path_to = command_arguments[1];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path_from = validatePathAndGetAsRelative(path_from);
|
||||
String relative_path_to = validatePathAndGetAsRelative(path_to);
|
||||
|
||||
if (disk->isFile(relative_path_from))
|
||||
disk->moveFile(relative_path_from, relative_path_to);
|
||||
else
|
||||
disk->moveDirectory(relative_path_from, relative_path_to);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandMove()
|
||||
{
|
||||
return std::make_shared<DB::CommandMove>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandMove()
|
||||
{
|
||||
return std::make_unique<DB::CommandMove>();
|
||||
}
|
||||
|
@ -1,78 +1,52 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandRead final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandRead()
|
||||
{
|
||||
command_name = "read";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "Read a file from `FROM_PATH` to `TO_PATH`";
|
||||
usage = "read [OPTION]... <FROM_PATH> [<TO_PATH>]";
|
||||
command_option_description->add_options()
|
||||
("output", po::value<String>(), "file to which we are reading, defaults to `stdout`");
|
||||
description = "Read a file from `path-from` to `path-to`";
|
||||
options_description.add_options()("path-from", po::value<String>(), "file from which we are reading (mandatory, positional)")(
|
||||
"path-to", po::value<String>(), "file to which we are writing, defaults to `stdout`");
|
||||
positional_options_description.add("path-from", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (options.count("output"))
|
||||
config.setString("output", options["output"].as<String>());
|
||||
}
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
String path_from = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-from"));
|
||||
std::optional<String> path_to = getValueFromCommandLineOptionsWithOptional<String>(options, "path-to");
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
auto in = disk.getDisk()->readFile(path_from);
|
||||
std::unique_ptr<WriteBufferFromFileBase> out = {};
|
||||
if (path_to.has_value())
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path = validatePathAndGetAsRelative(command_arguments[0]);
|
||||
|
||||
String path_output = config.getString("output", "");
|
||||
|
||||
if (!path_output.empty())
|
||||
{
|
||||
String relative_path_output = validatePathAndGetAsRelative(path_output);
|
||||
|
||||
auto in = disk->readFile(relative_path);
|
||||
auto out = disk->writeFile(relative_path_output);
|
||||
String relative_path_to = disk.getRelativeFromRoot(path_to.value());
|
||||
out = disk.getDisk()->writeFile(relative_path_to);
|
||||
copyData(*in, *out);
|
||||
out->finalize();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto in = disk->readFile(relative_path);
|
||||
std::unique_ptr<WriteBufferFromFileBase> out = std::make_unique<WriteBufferFromFileDescriptor>(STDOUT_FILENO);
|
||||
out = std::make_unique<WriteBufferFromFileDescriptor>(STDOUT_FILENO);
|
||||
copyData(*in, *out);
|
||||
out->write('\n');
|
||||
}
|
||||
out->finalize();
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandRead()
|
||||
{
|
||||
return std::make_shared<DB::CommandRead>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandRead()
|
||||
{
|
||||
return std::make_unique<DB::CommandRead>();
|
||||
}
|
||||
|
@ -1,5 +1,6 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include "Common/Exception.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -9,46 +10,49 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
|
||||
class CommandRemove final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandRemove()
|
||||
{
|
||||
command_name = "remove";
|
||||
description = "Remove file or directory with all children. Throws exception if file doesn't exists.\nPath should be in format './' or './path' or 'path'";
|
||||
usage = "remove [OPTION]... <PATH>";
|
||||
description = "Remove file or directory. Throws exception if file doesn't exists";
|
||||
options_description.add_options()("path", po::value<String>(), "path that is going to be deleted (mandatory, positional)")(
|
||||
"recursive,r", "recursively removes the directory (required to remove a directory)");
|
||||
positional_options_description.add("path", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration &,
|
||||
po::variables_map &) const override
|
||||
{}
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
const String & path = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path"));
|
||||
bool recursive = options.count("recursive");
|
||||
if (!disk.getDisk()->exists(path))
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} on disk {} doesn't exist", path, disk.getDisk()->getName());
|
||||
}
|
||||
else if (disk.getDisk()->isDirectory(path))
|
||||
{
|
||||
if (!recursive)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot remove '{}': Is a directory", path);
|
||||
}
|
||||
else
|
||||
{
|
||||
disk.getDisk()->removeRecursive(path);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
disk.getDisk()->removeFileIfExists(path);
|
||||
}
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path = command_arguments[0];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path = validatePathAndGetAsRelative(path);
|
||||
|
||||
disk->removeRecursive(relative_path);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandRemove()
|
||||
{
|
||||
return std::make_shared<DB::CommandRemove>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandRemove()
|
||||
{
|
||||
return std::make_unique<DB::CommandRemove>();
|
||||
}
|
||||
|
35
programs/disks/CommandSwitchDisk.cpp
Normal file
35
programs/disks/CommandSwitchDisk.cpp
Normal file
@ -0,0 +1,35 @@
|
||||
#include <optional>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksApp.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CommandSwitchDisk final : public ICommand
|
||||
{
|
||||
public:
|
||||
explicit CommandSwitchDisk() : ICommand()
|
||||
{
|
||||
command_name = "switch-disk";
|
||||
description = "Switch disk (makes sense only in interactive mode)";
|
||||
options_description.add_options()("disk", po::value<String>(), "the disk to switch to (mandatory, positional)")(
|
||||
"path", po::value<String>(), "the path to switch on the disk");
|
||||
positional_options_description.add("disk", 1);
|
||||
}
|
||||
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
String disk = getValueFromCommandLineOptions<String>(options, "disk");
|
||||
std::optional<String> path = getValueFromCommandLineOptionsWithOptional<String>(options, "path");
|
||||
|
||||
client.switchToDisk(disk, path);
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandSwitchDisk()
|
||||
{
|
||||
return std::make_shared<DB::CommandSwitchDisk>();
|
||||
}
|
||||
}
|
34
programs/disks/CommandTouch.cpp
Normal file
34
programs/disks/CommandTouch.cpp
Normal file
@ -0,0 +1,34 @@
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
#include "DisksApp.h"
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class CommandTouch final : public ICommand
|
||||
{
|
||||
public:
|
||||
explicit CommandTouch() : ICommand()
|
||||
{
|
||||
command_name = "touch";
|
||||
description = "Create a file by path";
|
||||
options_description.add_options()("path", po::value<String>(), "the path of listing (mandatory, positional)");
|
||||
positional_options_description.add("path", 1);
|
||||
}
|
||||
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
String path = getValueFromCommandLineOptionsThrow<String>(options, "path");
|
||||
|
||||
disk.getDisk()->createFile(disk.getRelativeFromRoot(path));
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandTouch()
|
||||
{
|
||||
return std::make_shared<DB::CommandTouch>();
|
||||
}
|
||||
}
|
@ -1,79 +1,57 @@
|
||||
#include "ICommand.h"
|
||||
#include <Interpreters/Context.h>
|
||||
#include "ICommand.h"
|
||||
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <IO/ReadBufferFromFile.h>
|
||||
#include <IO/WriteBufferFromFile.h>
|
||||
#include <IO/copyData.h>
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class CommandWrite final : public ICommand
|
||||
{
|
||||
public:
|
||||
CommandWrite()
|
||||
{
|
||||
command_name = "write";
|
||||
command_option_description.emplace(createOptionsDescription("Allowed options", getTerminalWidth()));
|
||||
description = "Write a file from `FROM_PATH` to `TO_PATH`";
|
||||
usage = "write [OPTION]... [<FROM_PATH>] <TO_PATH>";
|
||||
command_option_description->add_options()
|
||||
("input", po::value<String>(), "file from which we are reading, defaults to `stdin`");
|
||||
description = "Write a file from `path-from` to `path-to`";
|
||||
options_description.add_options()("path-from", po::value<String>(), "file from which we are reading, defaults to `stdin` (input from `stdin` is finished by Ctrl+D)")(
|
||||
"path-to", po::value<String>(), "file to which we are writing (mandatory, positional)");
|
||||
positional_options_description.add("path-to", 1);
|
||||
}
|
||||
|
||||
void processOptions(
|
||||
Poco::Util::LayeredConfiguration & config,
|
||||
po::variables_map & options) const override
|
||||
|
||||
void executeImpl(const CommandLineOptions & options, DisksClient & client) override
|
||||
{
|
||||
if (options.count("input"))
|
||||
config.setString("input", options["input"].as<String>());
|
||||
}
|
||||
auto disk = client.getCurrentDiskWithPath();
|
||||
|
||||
void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) override
|
||||
{
|
||||
if (command_arguments.size() != 1)
|
||||
std::optional<String> path_from = getValueFromCommandLineOptionsWithOptional<String>(options, "path-from");
|
||||
|
||||
String path_to = disk.getRelativeFromRoot(getValueFromCommandLineOptionsThrow<String>(options, "path-to"));
|
||||
|
||||
auto in = [&]() -> std::unique_ptr<ReadBufferFromFileBase>
|
||||
{
|
||||
printHelpMessage();
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
if (!path_from.has_value())
|
||||
{
|
||||
return std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
|
||||
}
|
||||
else
|
||||
{
|
||||
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
|
||||
return disk.getDisk()->readFile(relative_path_from);
|
||||
}
|
||||
}();
|
||||
|
||||
String disk_name = config.getString("disk", "default");
|
||||
|
||||
const String & path = command_arguments[0];
|
||||
|
||||
DiskPtr disk = disk_selector->get(disk_name);
|
||||
|
||||
String relative_path = validatePathAndGetAsRelative(path);
|
||||
|
||||
String path_input = config.getString("input", "");
|
||||
std::unique_ptr<ReadBufferFromFileBase> in;
|
||||
if (path_input.empty())
|
||||
{
|
||||
in = std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
|
||||
}
|
||||
else
|
||||
{
|
||||
String relative_path_input = validatePathAndGetAsRelative(path_input);
|
||||
in = disk->readFile(relative_path_input);
|
||||
}
|
||||
|
||||
auto out = disk->writeFile(relative_path);
|
||||
auto out = disk.getDisk()->writeFile(path_to);
|
||||
copyData(*in, *out);
|
||||
out->finalize();
|
||||
}
|
||||
};
|
||||
|
||||
CommandPtr makeCommandWrite()
|
||||
{
|
||||
return std::make_shared<DB::CommandWrite>();
|
||||
}
|
||||
|
||||
std::unique_ptr <DB::ICommand> makeCommandWrite()
|
||||
{
|
||||
return std::make_unique<DB::CommandWrite>();
|
||||
}
|
||||
|
@ -1,11 +1,22 @@
|
||||
#include "DisksApp.h"
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include "Common/Exception.h"
|
||||
#include "Common/filesystemHelpers.h"
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand.h"
|
||||
#include "ICommand_fwd.h"
|
||||
|
||||
#include <cstring>
|
||||
#include <filesystem>
|
||||
#include <memory>
|
||||
#include <optional>
|
||||
|
||||
#include <Disks/registerDisks.h>
|
||||
|
||||
#include <Common/TerminalSize.h>
|
||||
#include <Formats/registerFormats.h>
|
||||
|
||||
#include <Common/TerminalSize.h>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
@ -13,74 +24,289 @@ namespace DB
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
};
|
||||
|
||||
LineReader::Patterns DisksApp::query_extenders = {"\\"};
|
||||
LineReader::Patterns DisksApp::query_delimiters = {""};
|
||||
String DisksApp::word_break_characters = " \t\v\f\a\b\r\n";
|
||||
|
||||
CommandPtr DisksApp::getCommandByName(const String & command) const
|
||||
{
|
||||
try
|
||||
{
|
||||
if (auto it = aliases.find(command); it != aliases.end())
|
||||
return command_descriptions.at(it->second);
|
||||
|
||||
return command_descriptions.at(command);
|
||||
}
|
||||
catch (std::out_of_range &)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The command `{}` is unknown", command);
|
||||
}
|
||||
}
|
||||
|
||||
size_t DisksApp::findCommandPos(std::vector<String> & common_arguments)
|
||||
std::vector<String> DisksApp::getEmptyCompletion(String command_name) const
|
||||
{
|
||||
for (size_t i = 0; i < common_arguments.size(); i++)
|
||||
if (supported_commands.contains(common_arguments[i]))
|
||||
return i + 1;
|
||||
return common_arguments.size();
|
||||
auto command_ptr = command_descriptions.at(command_name);
|
||||
std::vector<String> answer{};
|
||||
if (multidisk_commands.contains(command_ptr->command_name))
|
||||
{
|
||||
answer = client->getAllFilesByPatternFromAllDisks("");
|
||||
}
|
||||
else
|
||||
{
|
||||
answer = client->getCurrentDiskWithPath().getAllFilesByPattern("");
|
||||
}
|
||||
for (const auto & disk_name : client->getAllDiskNames())
|
||||
{
|
||||
answer.push_back(disk_name);
|
||||
}
|
||||
for (const auto & option : command_ptr->options_description.options())
|
||||
{
|
||||
answer.push_back("--" + option->long_name());
|
||||
}
|
||||
if (command_name == "help")
|
||||
{
|
||||
for (const auto & [current_command_name, description] : command_descriptions)
|
||||
{
|
||||
answer.push_back(current_command_name);
|
||||
}
|
||||
}
|
||||
std::sort(answer.begin(), answer.end());
|
||||
return answer;
|
||||
}
|
||||
|
||||
void DisksApp::printHelpMessage(ProgramOptionsDescription & command_option_description)
|
||||
std::vector<String> DisksApp::getCommandsToComplete(const String & command_prefix) const
|
||||
{
|
||||
std::optional<ProgramOptionsDescription> help_description =
|
||||
createOptionsDescription("Help Message for clickhouse-disks", getTerminalWidth());
|
||||
|
||||
help_description->add(command_option_description);
|
||||
|
||||
std::cout << "ClickHouse disk management tool\n";
|
||||
std::cout << "Usage: ./clickhouse-disks [OPTION]\n";
|
||||
std::cout << "clickhouse-disks\n\n";
|
||||
|
||||
for (const auto & current_command : supported_commands)
|
||||
std::cout << command_descriptions[current_command]->command_name
|
||||
<< "\t"
|
||||
<< command_descriptions[current_command]->description
|
||||
<< "\n\n";
|
||||
|
||||
std::cout << command_option_description << '\n';
|
||||
std::vector<String> answer{};
|
||||
for (const auto & [word, _] : command_descriptions)
|
||||
{
|
||||
if (word.starts_with(command_prefix))
|
||||
{
|
||||
answer.push_back(word);
|
||||
}
|
||||
}
|
||||
if (!answer.empty())
|
||||
{
|
||||
std::sort(answer.begin(), answer.end());
|
||||
return answer;
|
||||
}
|
||||
for (const auto & [word, _] : aliases)
|
||||
{
|
||||
if (word.starts_with(command_prefix))
|
||||
{
|
||||
answer.push_back(word);
|
||||
}
|
||||
}
|
||||
if (!answer.empty())
|
||||
{
|
||||
std::sort(answer.begin(), answer.end());
|
||||
return answer;
|
||||
}
|
||||
return {command_prefix};
|
||||
}
|
||||
|
||||
String DisksApp::getDefaultConfigFileName()
|
||||
std::vector<String> DisksApp::getCompletions(const String & prefix) const
|
||||
{
|
||||
return "/etc/clickhouse-server/config.xml";
|
||||
auto arguments = po::split_unix(prefix, word_break_characters);
|
||||
if (arguments.empty())
|
||||
{
|
||||
return {};
|
||||
}
|
||||
if (word_break_characters.contains(prefix.back()))
|
||||
{
|
||||
CommandPtr command;
|
||||
try
|
||||
{
|
||||
command = getCommandByName(arguments[0]);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return {arguments.back()};
|
||||
}
|
||||
return getEmptyCompletion(command->command_name);
|
||||
}
|
||||
else if (arguments.size() == 1)
|
||||
{
|
||||
String command_prefix = arguments[0];
|
||||
return getCommandsToComplete(command_prefix);
|
||||
}
|
||||
else
|
||||
{
|
||||
String last_token = arguments.back();
|
||||
CommandPtr command;
|
||||
try
|
||||
{
|
||||
command = getCommandByName(arguments[0]);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
return {last_token};
|
||||
}
|
||||
std::vector<String> answer = {};
|
||||
if (command->command_name == "help")
|
||||
{
|
||||
return getCommandsToComplete(last_token);
|
||||
}
|
||||
else
|
||||
{
|
||||
answer = [&]() -> std::vector<String>
|
||||
{
|
||||
if (multidisk_commands.contains(command->command_name))
|
||||
{
|
||||
return client->getAllFilesByPatternFromAllDisks(last_token);
|
||||
}
|
||||
else
|
||||
{
|
||||
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
|
||||
}
|
||||
}();
|
||||
|
||||
for (const auto & disk_name : client->getAllDiskNames())
|
||||
{
|
||||
if (disk_name.starts_with(last_token))
|
||||
{
|
||||
answer.push_back(disk_name);
|
||||
}
|
||||
}
|
||||
for (const auto & option : command->options_description.options())
|
||||
{
|
||||
String option_sign = "--" + option->long_name();
|
||||
if (option_sign.starts_with(last_token))
|
||||
{
|
||||
answer.push_back(option_sign);
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!answer.empty())
|
||||
{
|
||||
std::sort(answer.begin(), answer.end());
|
||||
return answer;
|
||||
}
|
||||
else
|
||||
{
|
||||
return {last_token};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DisksApp::addOptions(
|
||||
ProgramOptionsDescription & options_description_,
|
||||
boost::program_options::positional_options_description & positional_options_description
|
||||
)
|
||||
bool DisksApp::processQueryText(const String & text)
|
||||
{
|
||||
options_description_.add_options()
|
||||
("help,h", "Print common help message")
|
||||
("config-file,C", po::value<String>(), "Set config file")
|
||||
("disk", po::value<String>(), "Set disk name")
|
||||
("command_name", po::value<String>(), "Name for command to do")
|
||||
("save-logs", "Save logs to a file")
|
||||
("log-level", po::value<String>(), "Logging level")
|
||||
;
|
||||
if (text.find_first_not_of(word_break_characters) == std::string::npos)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if (exit_strings.find(text) != exit_strings.end())
|
||||
return false;
|
||||
CommandPtr command;
|
||||
try
|
||||
{
|
||||
auto arguments = po::split_unix(text, word_break_characters);
|
||||
command = getCommandByName(arguments[0]);
|
||||
arguments.erase(arguments.begin());
|
||||
command->execute(arguments, *client);
|
||||
}
|
||||
catch (DB::Exception & err)
|
||||
{
|
||||
int code = getCurrentExceptionCode();
|
||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||
{
|
||||
throw std::move(err);
|
||||
}
|
||||
else if (code == ErrorCodes::BAD_ARGUMENTS)
|
||||
{
|
||||
std::cerr << err.message() << "\n"
|
||||
<< "\n";
|
||||
if (command.get())
|
||||
{
|
||||
std::cerr << "COMMAND: " << command->command_name << "\n";
|
||||
std::cerr << command->options_description << "\n";
|
||||
}
|
||||
else
|
||||
{
|
||||
printAvailableCommandsHelpMessage();
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
std::cerr << err.message() << "\n";
|
||||
}
|
||||
}
|
||||
catch (std::exception & err)
|
||||
{
|
||||
std::cerr << err.what() << "\n";
|
||||
}
|
||||
|
||||
positional_options_description.add("command_name", 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
supported_commands = {"list-disks", "list", "move", "remove", "link", "copy", "write", "read", "mkdir"};
|
||||
#ifdef CLICKHOUSE_CLOUD
|
||||
supported_commands.insert("packed-io");
|
||||
#endif
|
||||
void DisksApp::runInteractiveReplxx()
|
||||
{
|
||||
ReplxxLineReader lr(
|
||||
suggest,
|
||||
history_file,
|
||||
/* multiline= */ false,
|
||||
query_extenders,
|
||||
query_delimiters,
|
||||
word_break_characters.c_str(),
|
||||
/* highlighter_= */ {});
|
||||
lr.enableBracketedPaste();
|
||||
|
||||
while (true)
|
||||
{
|
||||
DiskWithPath disk_with_path = client->getCurrentDiskWithPath();
|
||||
String prompt = "\x1b[1;34m" + disk_with_path.getDisk()->getName() + "\x1b[0m:" + "\x1b[1;31m" + disk_with_path.getCurrentPath()
|
||||
+ "\x1b[0m$ ";
|
||||
|
||||
auto input = lr.readLine(prompt, "\x1b[1;31m:-] \x1b[0m");
|
||||
if (input.empty())
|
||||
break;
|
||||
|
||||
if (!processQueryText(input))
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void DisksApp::parseAndCheckOptions(
|
||||
const std::vector<String> & arguments, const ProgramOptionsDescription & options_description, CommandLineOptions & options)
|
||||
{
|
||||
auto parser = po::command_line_parser(arguments).options(options_description).allow_unregistered();
|
||||
po::parsed_options parsed = parser.run();
|
||||
po::store(parsed, options);
|
||||
}
|
||||
|
||||
void DisksApp::addOptions()
|
||||
{
|
||||
options_description.add_options()("help,h", "Print common help message")("config-file,C", po::value<String>(), "Set config file")(
|
||||
"disk", po::value<String>(), "Set disk name")("save-logs", "Save logs to a file")(
|
||||
"log-level", po::value<String>(), "Logging level")("query,q", po::value<String>(), "Query for a non-interactive mode")(
|
||||
"test-mode", "Interactive interface in test regyme");
|
||||
|
||||
command_descriptions.emplace("list-disks", makeCommandListDisks());
|
||||
command_descriptions.emplace("copy", makeCommandCopy());
|
||||
command_descriptions.emplace("list", makeCommandList());
|
||||
command_descriptions.emplace("cd", makeCommandChangeDirectory());
|
||||
command_descriptions.emplace("move", makeCommandMove());
|
||||
command_descriptions.emplace("remove", makeCommandRemove());
|
||||
command_descriptions.emplace("link", makeCommandLink());
|
||||
command_descriptions.emplace("copy", makeCommandCopy());
|
||||
command_descriptions.emplace("write", makeCommandWrite());
|
||||
command_descriptions.emplace("read", makeCommandRead());
|
||||
command_descriptions.emplace("mkdir", makeCommandMkDir());
|
||||
command_descriptions.emplace("switch-disk", makeCommandSwitchDisk());
|
||||
command_descriptions.emplace("current_disk_with_path", makeCommandGetCurrentDiskAndPath());
|
||||
command_descriptions.emplace("touch", makeCommandTouch());
|
||||
command_descriptions.emplace("help", makeCommandHelp(*this));
|
||||
#ifdef CLICKHOUSE_CLOUD
|
||||
command_descriptions.emplace("packed-io", makeCommandPackedIO());
|
||||
#endif
|
||||
for (const auto & [command_name, command_ptr] : command_descriptions)
|
||||
{
|
||||
if (command_name != command_ptr->command_name)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Command name inside map doesn't coincide with actual command name");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DisksApp::processOptions()
|
||||
@ -93,76 +319,122 @@ void DisksApp::processOptions()
|
||||
config().setBool("save-logs", true);
|
||||
if (options.count("log-level"))
|
||||
config().setString("log-level", options["log-level"].as<String>());
|
||||
if (options.count("test-mode"))
|
||||
config().setBool("test-mode", true);
|
||||
if (options.count("query"))
|
||||
query = std::optional{options["query"].as<String>()};
|
||||
}
|
||||
|
||||
DisksApp::~DisksApp()
|
||||
|
||||
void DisksApp::printEntryHelpMessage() const
|
||||
{
|
||||
if (global_context)
|
||||
global_context->shutdown();
|
||||
std::cout << "\x1b[1;33m ClickHouse disk management tool \x1b[0m \n";
|
||||
std::cout << options_description << '\n';
|
||||
}
|
||||
|
||||
void DisksApp::init(std::vector<String> & common_arguments)
|
||||
|
||||
void DisksApp::printAvailableCommandsHelpMessage() const
|
||||
{
|
||||
stopOptionsProcessing();
|
||||
std::cout << "\x1b[1;32mAvailable commands:\x1b[0m\n";
|
||||
std::vector<std::pair<String, CommandPtr>> commands_with_aliases_and_descrtiptions{};
|
||||
size_t maximal_command_length = 0;
|
||||
for (const auto & [command_name, command_ptr] : command_descriptions)
|
||||
{
|
||||
std::string command_string = getCommandLineWithAliases(command_ptr);
|
||||
maximal_command_length = std::max(maximal_command_length, command_string.size());
|
||||
commands_with_aliases_and_descrtiptions.push_back({std::move(command_string), command_descriptions.at(command_name)});
|
||||
}
|
||||
for (const auto & [command_with_aliases, command_ptr] : commands_with_aliases_and_descrtiptions)
|
||||
{
|
||||
std::cout << "\x1b[1;33m" << command_with_aliases << "\x1b[0m" << std::string(5, ' ') << "\x1b[1;33m" << command_ptr->description
|
||||
<< "\x1b[0m \n";
|
||||
std::cout << command_ptr->options_description;
|
||||
std::cout << std::endl;
|
||||
}
|
||||
}
|
||||
|
||||
ProgramOptionsDescription options_description{createOptionsDescription("clickhouse-disks", getTerminalWidth())};
|
||||
void DisksApp::printCommandHelpMessage(CommandPtr command) const
|
||||
{
|
||||
String command_name_with_aliases = getCommandLineWithAliases(command);
|
||||
std::cout << "\x1b[1;32m" << command_name_with_aliases << "\x1b[0m" << std::string(2, ' ') << command->description << "\n";
|
||||
std::cout << command->options_description;
|
||||
}
|
||||
|
||||
po::positional_options_description positional_options_description;
|
||||
void DisksApp::printCommandHelpMessage(String command_name) const
|
||||
{
|
||||
printCommandHelpMessage(getCommandByName(command_name));
|
||||
}
|
||||
|
||||
addOptions(options_description, positional_options_description);
|
||||
String DisksApp::getCommandLineWithAliases(CommandPtr command) const
|
||||
{
|
||||
String command_string = command->command_name;
|
||||
bool need_comma = false;
|
||||
for (const auto & [alias_name, alias_command_name] : aliases)
|
||||
{
|
||||
if (alias_command_name == command->command_name)
|
||||
{
|
||||
if (std::exchange(need_comma, true))
|
||||
command_string += ",";
|
||||
else
|
||||
command_string += "(";
|
||||
command_string += alias_name;
|
||||
}
|
||||
}
|
||||
command_string += (need_comma ? ")" : "");
|
||||
return command_string;
|
||||
}
|
||||
|
||||
size_t command_pos = findCommandPos(common_arguments);
|
||||
std::vector<String> global_flags(command_pos);
|
||||
command_arguments.resize(common_arguments.size() - command_pos);
|
||||
copy(common_arguments.begin(), common_arguments.begin() + command_pos, global_flags.begin());
|
||||
copy(common_arguments.begin() + command_pos, common_arguments.end(), command_arguments.begin());
|
||||
void DisksApp::initializeHistoryFile()
|
||||
{
|
||||
String home_path;
|
||||
const char * home_path_cstr = getenv("HOME"); // NOLINT(concurrency-mt-unsafe)
|
||||
if (home_path_cstr)
|
||||
home_path = home_path_cstr;
|
||||
if (config().has("history-file"))
|
||||
history_file = config().getString("history-file");
|
||||
else
|
||||
history_file = home_path + "/.disks-file-history";
|
||||
|
||||
parseAndCheckOptions(options_description, positional_options_description, global_flags);
|
||||
if (!history_file.empty() && !fs::exists(history_file))
|
||||
{
|
||||
try
|
||||
{
|
||||
FS::createFile(history_file);
|
||||
}
|
||||
catch (const ErrnoException & e)
|
||||
{
|
||||
if (e.getErrno() != EEXIST)
|
||||
throw;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DisksApp::init(const std::vector<String> & common_arguments)
|
||||
{
|
||||
addOptions();
|
||||
parseAndCheckOptions(common_arguments, options_description, options);
|
||||
|
||||
po::notify(options);
|
||||
|
||||
if (options.count("help"))
|
||||
{
|
||||
printHelpMessage(options_description);
|
||||
printEntryHelpMessage();
|
||||
printAvailableCommandsHelpMessage();
|
||||
exit(0); // NOLINT(concurrency-mt-unsafe)
|
||||
}
|
||||
|
||||
if (!supported_commands.contains(command_name))
|
||||
{
|
||||
std::cerr << "Unknown command name: " << command_name << "\n";
|
||||
printHelpMessage(options_description);
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Bad Arguments");
|
||||
}
|
||||
|
||||
processOptions();
|
||||
}
|
||||
|
||||
void DisksApp::parseAndCheckOptions(
|
||||
ProgramOptionsDescription & options_description_,
|
||||
boost::program_options::positional_options_description & positional_options_description,
|
||||
std::vector<String> & arguments)
|
||||
String DisksApp::getDefaultConfigFileName()
|
||||
{
|
||||
auto parser = po::command_line_parser(arguments)
|
||||
.options(options_description_)
|
||||
.positional(positional_options_description)
|
||||
.allow_unregistered();
|
||||
|
||||
po::parsed_options parsed = parser.run();
|
||||
po::store(parsed, options);
|
||||
|
||||
auto positional_arguments = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::include_positional);
|
||||
for (const auto & arg : positional_arguments)
|
||||
{
|
||||
if (command_descriptions.contains(arg))
|
||||
{
|
||||
command_name = arg;
|
||||
break;
|
||||
}
|
||||
}
|
||||
return "/etc/clickhouse-server/config.xml";
|
||||
}
|
||||
|
||||
int DisksApp::main(const std::vector<String> & /*args*/)
|
||||
{
|
||||
std::vector<std::string> keys;
|
||||
config().keys(keys);
|
||||
if (config().has("config-file") || fs::exists(getDefaultConfigFileName()))
|
||||
{
|
||||
String config_path = config().getString("config-file", getDefaultConfigFileName());
|
||||
@ -173,9 +445,13 @@ int DisksApp::main(const std::vector<String> & /*args*/)
|
||||
}
|
||||
else
|
||||
{
|
||||
printEntryHelpMessage();
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "No config-file specified");
|
||||
}
|
||||
|
||||
config().keys(keys);
|
||||
initializeHistoryFile();
|
||||
|
||||
if (config().has("save-logs"))
|
||||
{
|
||||
auto log_level = config().getString("log-level", "trace");
|
||||
@ -200,61 +476,68 @@ int DisksApp::main(const std::vector<String> & /*args*/)
|
||||
global_context->setApplicationType(Context::ApplicationType::DISKS);
|
||||
|
||||
String path = config().getString("path", DBMS_DEFAULT_PATH);
|
||||
|
||||
global_context->setPath(path);
|
||||
|
||||
auto & command = command_descriptions[command_name];
|
||||
String main_disk = config().getString("disk", "default");
|
||||
|
||||
auto command_options = command->getCommandOptions();
|
||||
std::vector<String> args;
|
||||
if (command_options)
|
||||
auto validator = [](const Poco::Util::AbstractConfiguration &, const std::string &, const std::string &) { return true; };
|
||||
|
||||
constexpr auto config_prefix = "storage_configuration.disks";
|
||||
auto disk_selector = std::make_shared<DiskSelector>(std::unordered_set<String>{"cache", "encrypted"});
|
||||
disk_selector->initialize(config(), config_prefix, global_context, validator);
|
||||
|
||||
std::vector<std::pair<DiskPtr, std::optional<String>>> disks_with_path;
|
||||
|
||||
for (const auto & [_, disk_ptr] : disk_selector->getDisksMap())
|
||||
{
|
||||
auto parser = po::command_line_parser(command_arguments).options(*command_options).allow_unregistered();
|
||||
po::parsed_options parsed = parser.run();
|
||||
po::store(parsed, options);
|
||||
po::notify(options);
|
||||
disks_with_path.emplace_back(
|
||||
disk_ptr, (disk_ptr->getName() == "local") ? std::optional{fs::current_path().string()} : std::nullopt);
|
||||
}
|
||||
|
||||
args = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::include_positional);
|
||||
command->processOptions(config(), options);
|
||||
|
||||
client = std::make_unique<DisksClient>(std::move(disks_with_path), main_disk);
|
||||
|
||||
suggest.setCompletionsCallback([&](const String & prefix, size_t /* prefix_length */) { return getCompletions(prefix); });
|
||||
|
||||
if (!query.has_value())
|
||||
{
|
||||
runInteractive();
|
||||
}
|
||||
else
|
||||
{
|
||||
auto parser = po::command_line_parser(command_arguments).options({}).allow_unregistered();
|
||||
po::parsed_options parsed = parser.run();
|
||||
args = po::collect_unrecognized(parsed.options, po::collect_unrecognized_mode::include_positional);
|
||||
processQueryText(query.value());
|
||||
}
|
||||
|
||||
std::unordered_set<std::string> disks
|
||||
{
|
||||
config().getString("disk", "default"),
|
||||
config().getString("disk-from", config().getString("disk", "default")),
|
||||
config().getString("disk-to", config().getString("disk", "default")),
|
||||
};
|
||||
|
||||
auto validator = [&disks](
|
||||
const Poco::Util::AbstractConfiguration & config,
|
||||
const std::string & disk_config_prefix,
|
||||
const std::string & disk_name)
|
||||
{
|
||||
if (!disks.contains(disk_name))
|
||||
return false;
|
||||
|
||||
const auto disk_type = config.getString(disk_config_prefix + ".type", "local");
|
||||
|
||||
if (disk_type == "cache")
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Disk type 'cache' of disk {} is not supported by clickhouse-disks", disk_name);
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
constexpr auto config_prefix = "storage_configuration.disks";
|
||||
auto disk_selector = std::make_shared<DiskSelector>();
|
||||
disk_selector->initialize(config(), config_prefix, global_context, validator);
|
||||
|
||||
command->execute(args, disk_selector, config());
|
||||
|
||||
return Application::EXIT_OK;
|
||||
}
|
||||
|
||||
DisksApp::~DisksApp()
|
||||
{
|
||||
client.reset(nullptr);
|
||||
if (global_context)
|
||||
global_context->shutdown();
|
||||
}
|
||||
|
||||
void DisksApp::runInteractiveTestMode()
|
||||
{
|
||||
for (String input; std::getline(std::cin, input);)
|
||||
{
|
||||
if (!processQueryText(input))
|
||||
break;
|
||||
|
||||
std::cout << "\a\a\a\a" << std::endl;
|
||||
std::cerr << std::flush;
|
||||
}
|
||||
}
|
||||
|
||||
void DisksApp::runInteractive()
|
||||
{
|
||||
if (config().hasOption("test-mode"))
|
||||
runInteractiveTestMode();
|
||||
else
|
||||
runInteractiveReplxx();
|
||||
}
|
||||
}
|
||||
|
||||
int mainEntryClickHouseDisks(int argc, char ** argv)
|
||||
@ -269,16 +552,16 @@ int mainEntryClickHouseDisks(int argc, char ** argv)
|
||||
catch (const DB::Exception & e)
|
||||
{
|
||||
std::cerr << DB::getExceptionMessage(e, false) << std::endl;
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
catch (const boost::program_options::error & e)
|
||||
{
|
||||
std::cerr << "Bad arguments: " << e.what() << std::endl;
|
||||
return DB::ErrorCodes::BAD_ARGUMENTS;
|
||||
return 0;
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
std::cerr << DB::getCurrentExceptionMessage(true) << std::endl;
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -1,61 +1,107 @@
|
||||
#pragma once
|
||||
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include <Loggers/Loggers.h>
|
||||
#include "DisksClient.h"
|
||||
#include "ICommand_fwd.h"
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
#include <Poco/Util/Application.h>
|
||||
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
class ICommand;
|
||||
using CommandPtr = std::unique_ptr<ICommand>;
|
||||
|
||||
namespace po = boost::program_options;
|
||||
using ProgramOptionsDescription = boost::program_options::options_description;
|
||||
using CommandLineOptions = boost::program_options::variables_map;
|
||||
|
||||
class DisksApp : public Poco::Util::Application, public Loggers
|
||||
class DisksApp : public Poco::Util::Application
|
||||
{
|
||||
public:
|
||||
DisksApp() = default;
|
||||
~DisksApp() override;
|
||||
void addOptions();
|
||||
|
||||
void init(std::vector<String> & common_arguments);
|
||||
|
||||
int main(const std::vector<String> & args) override;
|
||||
|
||||
protected:
|
||||
static String getDefaultConfigFileName();
|
||||
|
||||
void addOptions(
|
||||
ProgramOptionsDescription & options_description,
|
||||
boost::program_options::positional_options_description & positional_options_description);
|
||||
void processOptions();
|
||||
|
||||
void printHelpMessage(ProgramOptionsDescription & command_option_description);
|
||||
bool processQueryText(const String & text);
|
||||
|
||||
size_t findCommandPos(std::vector<String> & common_arguments);
|
||||
void init(const std::vector<String> & common_arguments);
|
||||
|
||||
int main(const std::vector<String> & /*args*/) override;
|
||||
|
||||
CommandPtr getCommandByName(const String & command) const;
|
||||
|
||||
void initializeHistoryFile();
|
||||
|
||||
static void parseAndCheckOptions(
|
||||
const std::vector<String> & arguments, const ProgramOptionsDescription & options_description, CommandLineOptions & options);
|
||||
|
||||
void printEntryHelpMessage() const;
|
||||
void printAvailableCommandsHelpMessage() const;
|
||||
void printCommandHelpMessage(String command_name) const;
|
||||
void printCommandHelpMessage(CommandPtr command) const;
|
||||
String getCommandLineWithAliases(CommandPtr command) const;
|
||||
|
||||
|
||||
std::vector<String> getCompletions(const String & prefix) const;
|
||||
|
||||
std::vector<String> getEmptyCompletion(String command_name) const;
|
||||
|
||||
~DisksApp() override;
|
||||
|
||||
private:
|
||||
void parseAndCheckOptions(
|
||||
ProgramOptionsDescription & options_description,
|
||||
boost::program_options::positional_options_description & positional_options_description,
|
||||
std::vector<String> & arguments);
|
||||
void runInteractive();
|
||||
void runInteractiveReplxx();
|
||||
void runInteractiveTestMode();
|
||||
|
||||
String getDefaultConfigFileName();
|
||||
|
||||
std::vector<String> getCommandsToComplete(const String & command_prefix) const;
|
||||
|
||||
// Fields responsible for the REPL work
|
||||
String history_file;
|
||||
LineReader::Suggest suggest;
|
||||
static LineReader::Patterns query_extenders;
|
||||
static LineReader::Patterns query_delimiters;
|
||||
static String word_break_characters;
|
||||
|
||||
// General command line arguments parsing fields
|
||||
|
||||
protected:
|
||||
ContextMutablePtr global_context;
|
||||
SharedContextHolder shared_context;
|
||||
|
||||
String command_name;
|
||||
std::vector<String> command_arguments;
|
||||
|
||||
std::unordered_set<String> supported_commands;
|
||||
ContextMutablePtr global_context;
|
||||
ProgramOptionsDescription options_description;
|
||||
CommandLineOptions options;
|
||||
std::unordered_map<String, CommandPtr> command_descriptions;
|
||||
|
||||
po::variables_map options;
|
||||
};
|
||||
std::optional<String> query;
|
||||
|
||||
const std::unordered_map<String, String> aliases
|
||||
= {{"cp", "copy"},
|
||||
{"mv", "move"},
|
||||
{"ls", "list"},
|
||||
{"list_disks", "list-disks"},
|
||||
{"ln", "link"},
|
||||
{"rm", "remove"},
|
||||
{"cat", "read"},
|
||||
{"r", "read"},
|
||||
{"w", "write"},
|
||||
{"create", "touch"},
|
||||
{"delete", "remove"},
|
||||
{"ls-disks", "list-disks"},
|
||||
{"ls_disks", "list-disks"},
|
||||
{"packed_io", "packed-io"},
|
||||
{"change-dir", "cd"},
|
||||
{"change_dir", "cd"},
|
||||
{"switch_disk", "switch-disk"},
|
||||
{"current", "current_disk_with_path"},
|
||||
{"current_disk", "current_disk_with_path"},
|
||||
{"current_path", "current_disk_with_path"},
|
||||
{"cur", "current_disk_with_path"}};
|
||||
|
||||
std::set<String> multidisk_commands = {"copy", "packed-io", "switch-disk", "cd"};
|
||||
|
||||
std::unique_ptr<DisksClient> client{};
|
||||
};
|
||||
}
|
||||
|
263
programs/disks/DisksClient.cpp
Normal file
263
programs/disks/DisksClient.cpp
Normal file
@ -0,0 +1,263 @@
|
||||
#include "DisksClient.h"
|
||||
#include <Client/ClientBase.h>
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include <Disks/registerDisks.h>
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
|
||||
#include <Formats/registerFormats.h>
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
extern const int LOGICAL_ERROR;
|
||||
};
|
||||
|
||||
namespace DB
|
||||
{
|
||||
DiskWithPath::DiskWithPath(DiskPtr disk_, std::optional<String> path_) : disk(disk_)
|
||||
{
|
||||
if (path_.has_value())
|
||||
{
|
||||
if (!fs::path{path_.value()}.is_absolute())
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Initializing path {} is not absolute", path_.value());
|
||||
}
|
||||
path = path_.value();
|
||||
}
|
||||
else
|
||||
{
|
||||
path = String{"/"};
|
||||
}
|
||||
|
||||
String relative_path = normalizePathAndGetAsRelative(path);
|
||||
if (disk->isDirectory(relative_path) || (relative_path.empty() && (disk->isDirectory("/"))))
|
||||
{
|
||||
return;
|
||||
}
|
||||
throw Exception(
|
||||
ErrorCodes::BAD_ARGUMENTS,
|
||||
"Initializing path {} (normalized path: {}) at disk {} is not a directory",
|
||||
path,
|
||||
relative_path,
|
||||
disk->getName());
|
||||
}
|
||||
|
||||
std::vector<String> DiskWithPath::listAllFilesByPath(const String & any_path) const
|
||||
{
|
||||
if (isDirectory(any_path))
|
||||
{
|
||||
std::vector<String> file_names;
|
||||
disk->listFiles(getRelativeFromRoot(any_path), file_names);
|
||||
return file_names;
|
||||
}
|
||||
else
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) const
|
||||
{
|
||||
auto [path_before, path_after] = [&]() -> std::pair<String, String>
|
||||
{
|
||||
auto slash_pos = pattern.find_last_of('/');
|
||||
if (slash_pos >= pattern.size())
|
||||
{
|
||||
return {"", pattern};
|
||||
}
|
||||
else
|
||||
{
|
||||
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
|
||||
}
|
||||
}();
|
||||
|
||||
if (!isDirectory(path_before))
|
||||
{
|
||||
return {};
|
||||
}
|
||||
else
|
||||
{
|
||||
std::vector<String> file_names = listAllFilesByPath(path_before);
|
||||
|
||||
std::vector<String> answer;
|
||||
|
||||
for (const auto & file_name : file_names)
|
||||
{
|
||||
if (file_name.starts_with(path_after))
|
||||
{
|
||||
String file_pattern = path_before + file_name;
|
||||
if (isDirectory(file_pattern))
|
||||
{
|
||||
file_pattern = file_pattern + "/";
|
||||
}
|
||||
answer.push_back(file_pattern);
|
||||
}
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
};
|
||||
|
||||
void DiskWithPath::setPath(const String & any_path)
|
||||
{
|
||||
if (isDirectory(any_path))
|
||||
{
|
||||
path = getAbsolutePath(any_path);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} at disk {} is not a directory", any_path, disk->getName());
|
||||
}
|
||||
}
|
||||
|
||||
String DiskWithPath::validatePathAndGetAsRelative(const String & path)
|
||||
{
|
||||
String lexically_normal_path = fs::path(path).lexically_normal();
|
||||
if (lexically_normal_path.find("..") != std::string::npos)
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Path {} is not normalized", path);
|
||||
|
||||
/// If path is absolute we should keep it as relative inside disk, so disk will look like
|
||||
/// an ordinary filesystem with root.
|
||||
if (fs::path(lexically_normal_path).is_absolute())
|
||||
return lexically_normal_path.substr(1);
|
||||
|
||||
return lexically_normal_path;
|
||||
}
|
||||
|
||||
String DiskWithPath::normalizePathAndGetAsRelative(const String & messyPath)
|
||||
{
|
||||
std::filesystem::path path(messyPath);
|
||||
std::filesystem::path canonical_path = std::filesystem::weakly_canonical(path);
|
||||
String npath = canonical_path.make_preferred().string();
|
||||
return validatePathAndGetAsRelative(npath);
|
||||
}
|
||||
|
||||
String DiskWithPath::normalizePath(const String & path)
|
||||
{
|
||||
std::filesystem::path canonical_path = std::filesystem::weakly_canonical(path);
|
||||
return canonical_path.make_preferred().string();
|
||||
}
|
||||
|
||||
DisksClient::DisksClient(std::vector<std::pair<DiskPtr, std::optional<String>>> && disks_with_paths, std::optional<String> begin_disk)
|
||||
{
|
||||
if (disks_with_paths.empty())
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Initializing array of disks is empty");
|
||||
}
|
||||
if (!begin_disk.has_value())
|
||||
{
|
||||
begin_disk = disks_with_paths[0].first->getName();
|
||||
}
|
||||
bool has_begin_disk = false;
|
||||
for (auto & [disk, path] : disks_with_paths)
|
||||
{
|
||||
addDisk(disk, path);
|
||||
if (disk->getName() == begin_disk.value())
|
||||
{
|
||||
has_begin_disk = true;
|
||||
}
|
||||
}
|
||||
if (!has_begin_disk)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no begin_disk '{}' in initializing array", begin_disk.value());
|
||||
}
|
||||
current_disk = std::move(begin_disk.value());
|
||||
}
|
||||
|
||||
const DiskWithPath & DisksClient::getDiskWithPath(const String & disk) const
|
||||
{
|
||||
try
|
||||
{
|
||||
return disks.at(disk);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk '{}' is unknown", disk);
|
||||
}
|
||||
}
|
||||
|
||||
DiskWithPath & DisksClient::getDiskWithPath(const String & disk)
|
||||
{
|
||||
try
|
||||
{
|
||||
return disks.at(disk);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk '{}' is unknown", disk);
|
||||
}
|
||||
}
|
||||
|
||||
const DiskWithPath & DisksClient::getCurrentDiskWithPath() const
|
||||
{
|
||||
try
|
||||
{
|
||||
return disks.at(current_disk);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no current disk in client");
|
||||
}
|
||||
}
|
||||
|
||||
DiskWithPath & DisksClient::getCurrentDiskWithPath()
|
||||
{
|
||||
try
|
||||
{
|
||||
return disks.at(current_disk);
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "There is no current disk in client");
|
||||
}
|
||||
}
|
||||
|
||||
void DisksClient::switchToDisk(const String & disk_, const std::optional<String> & path_)
|
||||
{
|
||||
if (disks.contains(disk_))
|
||||
{
|
||||
if (path_.has_value())
|
||||
{
|
||||
disks.at(disk_).setPath(path_.value());
|
||||
}
|
||||
current_disk = disk_;
|
||||
}
|
||||
else
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk '{}' is unknown", disk_);
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<String> DisksClient::getAllDiskNames() const
|
||||
{
|
||||
std::vector<String> answer{};
|
||||
answer.reserve(disks.size());
|
||||
for (const auto & [disk_name, _] : disks)
|
||||
{
|
||||
answer.push_back(disk_name);
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
std::vector<String> DisksClient::getAllFilesByPatternFromAllDisks(const String & pattern) const
|
||||
{
|
||||
std::vector<String> answer{};
|
||||
for (const auto & [_, disk] : disks)
|
||||
{
|
||||
for (auto & word : disk.getAllFilesByPattern(pattern))
|
||||
{
|
||||
answer.push_back(word);
|
||||
}
|
||||
}
|
||||
return answer;
|
||||
}
|
||||
|
||||
void DisksClient::addDisk(DiskPtr disk_, const std::optional<String> & path_)
|
||||
{
|
||||
String disk_name = disk_->getName();
|
||||
if (disks.contains(disk_->getName()))
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "The disk '{}' already exists", disk_name);
|
||||
}
|
||||
disks.emplace(disk_name, DiskWithPath{disk_, path_});
|
||||
}
|
||||
}
|
89
programs/disks/DisksClient.h
Normal file
89
programs/disks/DisksClient.h
Normal file
@ -0,0 +1,89 @@
|
||||
#pragma once
|
||||
|
||||
#include <filesystem>
|
||||
#include <optional>
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
#include <vector>
|
||||
#include <Client/ReplxxLineReader.h>
|
||||
#include <Loggers/Loggers.h>
|
||||
#include "Disks/IDisk.h"
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
#include <boost/program_options/options_description.hpp>
|
||||
#include <boost/program_options/variables_map.hpp>
|
||||
|
||||
namespace fs = std::filesystem;
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
std::vector<String> split(const String & text, const String & delimiters);
|
||||
|
||||
using ProgramOptionsDescription = boost::program_options::options_description;
|
||||
using CommandLineOptions = boost::program_options::variables_map;
|
||||
|
||||
class DiskWithPath
|
||||
{
|
||||
public:
|
||||
explicit DiskWithPath(DiskPtr disk_, std::optional<String> path_ = std::nullopt);
|
||||
|
||||
String getAbsolutePath(const String & any_path) const { return normalizePath(fs::path(path) / any_path); }
|
||||
|
||||
String getCurrentPath() const { return path; }
|
||||
|
||||
bool isDirectory(const String & any_path) const
|
||||
{
|
||||
return disk->isDirectory(getRelativeFromRoot(any_path)) || (getRelativeFromRoot(any_path).empty() && (disk->isDirectory("/")));
|
||||
}
|
||||
|
||||
std::vector<String> listAllFilesByPath(const String & any_path) const;
|
||||
|
||||
std::vector<String> getAllFilesByPattern(const String & pattern) const;
|
||||
|
||||
DiskPtr getDisk() const { return disk; }
|
||||
|
||||
void setPath(const String & any_path);
|
||||
|
||||
String getRelativeFromRoot(const String & any_path) const { return normalizePathAndGetAsRelative(getAbsolutePath(any_path)); }
|
||||
|
||||
private:
|
||||
static String validatePathAndGetAsRelative(const String & path);
|
||||
static std::string normalizePathAndGetAsRelative(const std::string & messyPath);
|
||||
static std::string normalizePath(const std::string & messyPath);
|
||||
|
||||
const DiskPtr disk;
|
||||
String path;
|
||||
};
|
||||
|
||||
class DisksClient
|
||||
{
|
||||
public:
|
||||
explicit DisksClient(std::vector<std::pair<DiskPtr, std::optional<String>>> && disks_with_paths, std::optional<String> begin_disk);
|
||||
|
||||
const DiskWithPath & getDiskWithPath(const String & disk) const;
|
||||
|
||||
DiskWithPath & getDiskWithPath(const String & disk);
|
||||
|
||||
const DiskWithPath & getCurrentDiskWithPath() const;
|
||||
|
||||
DiskWithPath & getCurrentDiskWithPath();
|
||||
|
||||
DiskPtr getCurrentDisk() const { return getCurrentDiskWithPath().getDisk(); }
|
||||
|
||||
DiskPtr getDisk(const String & disk) const { return getDiskWithPath(disk).getDisk(); }
|
||||
|
||||
void switchToDisk(const String & disk_, const std::optional<String> & path_);
|
||||
|
||||
std::vector<String> getAllDiskNames() const;
|
||||
|
||||
std::vector<String> getAllFilesByPatternFromAllDisks(const String & pattern) const;
|
||||
|
||||
|
||||
private:
|
||||
void addDisk(DiskPtr disk_, const std::optional<String> & path_);
|
||||
|
||||
String current_disk;
|
||||
std::unordered_map<String, DiskWithPath> disks;
|
||||
};
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
#include "ICommand.h"
|
||||
#include <iostream>
|
||||
#include "DisksClient.h"
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -10,43 +10,42 @@ namespace ErrorCodes
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
void ICommand::printHelpMessage() const
|
||||
CommandLineOptions ICommand::processCommandLineArguments(const Strings & commands)
|
||||
{
|
||||
std::cout << "Command: " << command_name << '\n';
|
||||
std::cout << "Description: " << description << '\n';
|
||||
std::cout << "Usage: " << usage << '\n';
|
||||
CommandLineOptions options;
|
||||
auto parser = po::command_line_parser(commands);
|
||||
parser.options(options_description).positional(positional_options_description);
|
||||
|
||||
if (command_option_description)
|
||||
po::parsed_options parsed = parser.run();
|
||||
po::store(parsed, options);
|
||||
|
||||
return options;
|
||||
}
|
||||
|
||||
void ICommand::execute(const Strings & commands, DisksClient & client)
|
||||
{
|
||||
try
|
||||
{
|
||||
auto options = *command_option_description;
|
||||
if (!options.options().empty())
|
||||
std::cout << options << '\n';
|
||||
processCommandLineArguments(commands);
|
||||
}
|
||||
catch (std::exception & exc)
|
||||
{
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "{}", exc.what());
|
||||
}
|
||||
executeImpl(processCommandLineArguments(commands), client);
|
||||
}
|
||||
|
||||
DiskWithPath & ICommand::getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name)
|
||||
{
|
||||
auto disk_name = getValueFromCommandLineOptionsWithOptional<String>(options, name);
|
||||
if (disk_name.has_value())
|
||||
{
|
||||
return client.getDiskWithPath(disk_name.value());
|
||||
}
|
||||
else
|
||||
{
|
||||
return client.getCurrentDiskWithPath();
|
||||
}
|
||||
}
|
||||
|
||||
void ICommand::addOptions(ProgramOptionsDescription & options_description)
|
||||
{
|
||||
if (!command_option_description || command_option_description->options().empty())
|
||||
return;
|
||||
|
||||
options_description.add(*command_option_description);
|
||||
}
|
||||
|
||||
String ICommand::validatePathAndGetAsRelative(const String & path)
|
||||
{
|
||||
/// If path contain non-normalized symbols like . we will normalized them. If the resulting normalized path
|
||||
/// still contain '..' it can be dangerous, disallow such paths. Also since clickhouse-disks
|
||||
/// is not an interactive program (don't track you current path) it's OK to disallow .. paths.
|
||||
String lexically_normal_path = fs::path(path).lexically_normal();
|
||||
if (lexically_normal_path.find("..") != std::string::npos)
|
||||
throw DB::Exception(DB::ErrorCodes::BAD_ARGUMENTS, "Path {} is not normalized", path);
|
||||
|
||||
/// If path is absolute we should keep it as relative inside disk, so disk will look like
|
||||
/// an ordinary filesystem with root.
|
||||
if (fs::path(lexically_normal_path).is_absolute())
|
||||
return lexically_normal_path.substr(1);
|
||||
|
||||
return lexically_normal_path;
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -1,66 +1,146 @@
|
||||
#pragma once
|
||||
|
||||
#include <Disks/IDisk.h>
|
||||
#include <optional>
|
||||
#include <Disks/DiskSelector.h>
|
||||
#include <Disks/IDisk.h>
|
||||
|
||||
#include <boost/any/bad_any_cast.hpp>
|
||||
#include <boost/program_options.hpp>
|
||||
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include "Common/Exception.h"
|
||||
#include <Common/Config/ConfigProcessor.h>
|
||||
|
||||
#include <memory>
|
||||
#include <boost/program_options/positional_options.hpp>
|
||||
|
||||
#include "DisksApp.h"
|
||||
|
||||
#include "DisksClient.h"
|
||||
|
||||
#include "ICommand_fwd.h"
|
||||
|
||||
namespace DB
|
||||
{
|
||||
|
||||
namespace po = boost::program_options;
|
||||
using ProgramOptionsDescription = boost::program_options::options_description;
|
||||
using CommandLineOptions = boost::program_options::variables_map;
|
||||
using ProgramOptionsDescription = po::options_description;
|
||||
using PositionalProgramOptionsDescription = po::positional_options_description;
|
||||
using CommandLineOptions = po::variables_map;
|
||||
|
||||
namespace ErrorCodes
|
||||
{
|
||||
extern const int BAD_ARGUMENTS;
|
||||
}
|
||||
|
||||
class ICommand
|
||||
{
|
||||
public:
|
||||
ICommand() = default;
|
||||
explicit ICommand() = default;
|
||||
|
||||
virtual ~ICommand() = default;
|
||||
|
||||
virtual void execute(
|
||||
const std::vector<String> & command_arguments,
|
||||
std::shared_ptr<DiskSelector> & disk_selector,
|
||||
Poco::Util::LayeredConfiguration & config) = 0;
|
||||
void execute(const Strings & commands, DisksClient & client);
|
||||
|
||||
const std::optional<ProgramOptionsDescription> & getCommandOptions() const { return command_option_description; }
|
||||
virtual void executeImpl(const CommandLineOptions & options, DisksClient & client) = 0;
|
||||
|
||||
void addOptions(ProgramOptionsDescription & options_description);
|
||||
|
||||
virtual void processOptions(Poco::Util::LayeredConfiguration & config, po::variables_map & options) const = 0;
|
||||
CommandLineOptions processCommandLineArguments(const Strings & commands);
|
||||
|
||||
protected:
|
||||
void printHelpMessage() const;
|
||||
template <typename T>
|
||||
static T getValueFromCommandLineOptions(const CommandLineOptions & options, const String & name)
|
||||
{
|
||||
try
|
||||
{
|
||||
return options[name].as<T>();
|
||||
}
|
||||
catch (boost::bad_any_cast &)
|
||||
{
|
||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Argument '{}' has wrong type and can't be parsed", name);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T getValueFromCommandLineOptionsThrow(const CommandLineOptions & options, const String & name)
|
||||
{
|
||||
if (options.count(name))
|
||||
{
|
||||
return getValueFromCommandLineOptions<T>(options, name);
|
||||
}
|
||||
else
|
||||
{
|
||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static T getValueFromCommandLineOptionsWithDefault(const CommandLineOptions & options, const String & name, const T & default_value)
|
||||
{
|
||||
if (options.count(name))
|
||||
{
|
||||
return getValueFromCommandLineOptions<T>(options, name);
|
||||
}
|
||||
else
|
||||
{
|
||||
return default_value;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static std::optional<T> getValueFromCommandLineOptionsWithOptional(const CommandLineOptions & options, const String & name)
|
||||
{
|
||||
if (options.count(name))
|
||||
{
|
||||
return std::optional{getValueFromCommandLineOptions<T>(options, name)};
|
||||
}
|
||||
else
|
||||
{
|
||||
return std::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
DiskWithPath & getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name);
|
||||
|
||||
String getTargetLocation(const String & path_from, DiskWithPath & disk_to, const String & path_to)
|
||||
{
|
||||
if (!disk_to.getDisk()->isDirectory(path_to))
|
||||
{
|
||||
return path_to;
|
||||
}
|
||||
String copied_path_from = path_from;
|
||||
if (copied_path_from.ends_with('/'))
|
||||
{
|
||||
copied_path_from.pop_back();
|
||||
}
|
||||
String plain_filename = fs::path(copied_path_from).filename();
|
||||
|
||||
return fs::path{path_to} / plain_filename;
|
||||
}
|
||||
|
||||
static String validatePathAndGetAsRelative(const String & path);
|
||||
|
||||
public:
|
||||
String command_name;
|
||||
String description;
|
||||
ProgramOptionsDescription options_description;
|
||||
|
||||
protected:
|
||||
std::optional<ProgramOptionsDescription> command_option_description;
|
||||
String usage;
|
||||
po::positional_options_description positional_options_description;
|
||||
PositionalProgramOptionsDescription positional_options_description;
|
||||
};
|
||||
|
||||
using CommandPtr = std::unique_ptr<ICommand>;
|
||||
|
||||
}
|
||||
|
||||
DB::CommandPtr makeCommandCopy();
|
||||
DB::CommandPtr makeCommandLink();
|
||||
DB::CommandPtr makeCommandList();
|
||||
DB::CommandPtr makeCommandListDisks();
|
||||
DB::CommandPtr makeCommandList();
|
||||
DB::CommandPtr makeCommandChangeDirectory();
|
||||
DB::CommandPtr makeCommandLink();
|
||||
DB::CommandPtr makeCommandMove();
|
||||
DB::CommandPtr makeCommandRead();
|
||||
DB::CommandPtr makeCommandRemove();
|
||||
DB::CommandPtr makeCommandWrite();
|
||||
DB::CommandPtr makeCommandMkDir();
|
||||
DB::CommandPtr makeCommandSwitchDisk();
|
||||
DB::CommandPtr makeCommandGetCurrentDiskAndPath();
|
||||
DB::CommandPtr makeCommandHelp(const DisksApp & disks_app);
|
||||
DB::CommandPtr makeCommandTouch();
|
||||
#ifdef CLICKHOUSE_CLOUD
|
||||
DB::CommandPtr makeCommandPackedIO();
|
||||
#endif
|
||||
}
|
||||
|
10
programs/disks/ICommand_fwd.h
Normal file
10
programs/disks/ICommand_fwd.h
Normal file
@ -0,0 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include <memory>
|
||||
|
||||
namespace DB
|
||||
{
|
||||
class ICommand;
|
||||
|
||||
using CommandPtr = std::shared_ptr<ICommand>;
|
||||
}
|
@ -27,6 +27,8 @@
|
||||
#include <sys/stat.h>
|
||||
#include <pwd.h>
|
||||
|
||||
#include <Common/Jemalloc.h>
|
||||
|
||||
#include <Interpreters/Context.h>
|
||||
|
||||
#include <Coordination/FourLetterCommand.h>
|
||||
@ -262,11 +264,43 @@ HTTPContextPtr httpContext()
|
||||
return std::make_shared<KeeperHTTPContext>(Context::getGlobalContextInstance());
|
||||
}
|
||||
|
||||
String getKeeperPath(Poco::Util::LayeredConfiguration & config)
|
||||
{
|
||||
String path;
|
||||
if (config.has("keeper_server.storage_path"))
|
||||
{
|
||||
path = config.getString("keeper_server.storage_path");
|
||||
}
|
||||
else if (config.has("keeper_server.log_storage_path"))
|
||||
{
|
||||
path = std::filesystem::path(config.getString("keeper_server.log_storage_path")).parent_path();
|
||||
}
|
||||
else if (config.has("keeper_server.snapshot_storage_path"))
|
||||
{
|
||||
path = std::filesystem::path(config.getString("keeper_server.snapshot_storage_path")).parent_path();
|
||||
}
|
||||
else if (std::filesystem::is_directory(std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination"))
|
||||
{
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"By default 'keeper_server.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper_server.storage_path' in the keeper configuration explicitly",
|
||||
KEEPER_DEFAULT_PATH, String{std::filesystem::path{config.getString("path", DBMS_DEFAULT_PATH)} / "coordination"});
|
||||
}
|
||||
else
|
||||
{
|
||||
path = KEEPER_DEFAULT_PATH;
|
||||
}
|
||||
return path;
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
#if USE_JEMALLOC
|
||||
setJemallocBackgroundThreads(true);
|
||||
#endif
|
||||
Poco::Logger * log = &logger();
|
||||
|
||||
UseSSL use_ssl;
|
||||
@ -311,31 +345,7 @@ try
|
||||
|
||||
updateMemorySoftLimitInConfig(config());
|
||||
|
||||
std::string path;
|
||||
|
||||
if (config().has("keeper_server.storage_path"))
|
||||
{
|
||||
path = config().getString("keeper_server.storage_path");
|
||||
}
|
||||
else if (config().has("keeper_server.log_storage_path"))
|
||||
{
|
||||
path = std::filesystem::path(config().getString("keeper_server.log_storage_path")).parent_path();
|
||||
}
|
||||
else if (config().has("keeper_server.snapshot_storage_path"))
|
||||
{
|
||||
path = std::filesystem::path(config().getString("keeper_server.snapshot_storage_path")).parent_path();
|
||||
}
|
||||
else if (std::filesystem::is_directory(std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"))
|
||||
{
|
||||
throw Exception(ErrorCodes::NO_ELEMENTS_IN_CONFIG,
|
||||
"By default 'keeper_server.storage_path' could be assigned to {}, but the directory {} already exists. Please specify 'keeper_server.storage_path' in the keeper configuration explicitly",
|
||||
KEEPER_DEFAULT_PATH, String{std::filesystem::path{config().getString("path", DBMS_DEFAULT_PATH)} / "coordination"});
|
||||
}
|
||||
else
|
||||
{
|
||||
path = KEEPER_DEFAULT_PATH;
|
||||
}
|
||||
|
||||
std::string path = getKeeperPath(config());
|
||||
std::filesystem::create_directories(path);
|
||||
|
||||
/// Check that the process user id matches the owner of the data.
|
||||
@ -549,7 +559,7 @@ try
|
||||
auto main_config_reloader = std::make_unique<ConfigReloader>(
|
||||
config_path,
|
||||
extra_paths,
|
||||
config().getString("path", KEEPER_DEFAULT_PATH),
|
||||
getKeeperPath(config()),
|
||||
std::move(unused_cache),
|
||||
unused_event,
|
||||
[&](ConfigurationPtr config, bool /* initial_loading */)
|
||||
|
9
programs/keeper/conf.d/local.yaml
Normal file
9
programs/keeper/conf.d/local.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
logger:
|
||||
log:
|
||||
"@remove": remove
|
||||
errorlog:
|
||||
"@remove": remove
|
||||
console: 1
|
||||
keeper_server:
|
||||
log_storage_path: ./logs
|
||||
snapshot_storage_path: ./snapshots
|
@ -11,6 +11,7 @@
|
||||
#include <Poco/Util/HelpFormatter.h>
|
||||
#include <Poco/Environment.h>
|
||||
#include <Poco/Config.h>
|
||||
#include <Common/Jemalloc.h>
|
||||
#include <Common/scope_guard_safe.h>
|
||||
#include <Common/logger_useful.h>
|
||||
#include <base/phdr_cache.h>
|
||||
@ -586,6 +587,54 @@ static void sanityChecks(Server & server)
|
||||
}
|
||||
}
|
||||
|
||||
void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, ContextMutablePtr context, Poco::Logger * log)
|
||||
{
|
||||
try
|
||||
{
|
||||
Poco::Util::AbstractConfiguration::Keys keys;
|
||||
config.keys("startup_scripts", keys);
|
||||
|
||||
SetResultDetailsFunc callback;
|
||||
for (const auto & key : keys)
|
||||
{
|
||||
std::string full_prefix = "startup_scripts." + key;
|
||||
|
||||
if (config.has(full_prefix + ".condition"))
|
||||
{
|
||||
auto condition = config.getString(full_prefix + ".condition");
|
||||
auto condition_read_buffer = ReadBufferFromString(condition);
|
||||
auto condition_write_buffer = WriteBufferFromOwnString();
|
||||
|
||||
LOG_DEBUG(log, "Checking startup query condition `{}`", condition);
|
||||
executeQuery(condition_read_buffer, condition_write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
||||
|
||||
auto result = condition_write_buffer.str();
|
||||
|
||||
if (result != "1\n" && result != "true\n")
|
||||
{
|
||||
if (result != "0\n" && result != "false\n")
|
||||
context->addWarningMessage(fmt::format("The condition query returned `{}`, which can't be interpreted as a boolean (`0`, `false`, `1`, `true`). Will skip this query.", result));
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG_DEBUG(log, "Condition is true, will execute the query next");
|
||||
}
|
||||
|
||||
auto query = config.getString(full_prefix + ".query");
|
||||
auto read_buffer = ReadBufferFromString(query);
|
||||
auto write_buffer = WriteBufferFromOwnString();
|
||||
|
||||
LOG_DEBUG(log, "Executing query `{}`", query);
|
||||
executeQuery(read_buffer, write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
tryLogCurrentException(log, "Failed to parse startup scripts file");
|
||||
}
|
||||
}
|
||||
|
||||
static void initializeAzureSDKLogger(
|
||||
[[ maybe_unused ]] const ServerSettings & server_settings,
|
||||
[[ maybe_unused ]] int server_logs_level)
|
||||
@ -625,9 +674,35 @@ static void initializeAzureSDKLogger(
|
||||
#endif
|
||||
}
|
||||
|
||||
#if defined(SANITIZER)
|
||||
static std::vector<String> getSanitizerNames()
|
||||
{
|
||||
std::vector<String> names;
|
||||
|
||||
#if defined(ADDRESS_SANITIZER)
|
||||
names.push_back("address");
|
||||
#endif
|
||||
#if defined(THREAD_SANITIZER)
|
||||
names.push_back("thread");
|
||||
#endif
|
||||
#if defined(MEMORY_SANITIZER)
|
||||
names.push_back("memory");
|
||||
#endif
|
||||
#if defined(UNDEFINED_BEHAVIOR_SANITIZER)
|
||||
names.push_back("undefined behavior");
|
||||
#endif
|
||||
|
||||
return names;
|
||||
}
|
||||
#endif
|
||||
|
||||
int Server::main(const std::vector<std::string> & /*args*/)
|
||||
try
|
||||
{
|
||||
#if USE_JEMALLOC
|
||||
setJemallocBackgroundThreads(true);
|
||||
#endif
|
||||
|
||||
Stopwatch startup_watch;
|
||||
|
||||
Poco::Logger * log = &logger();
|
||||
@ -711,7 +786,17 @@ try
|
||||
global_context->addWarningMessage("ThreadFuzzer is enabled. Application will run slowly and unstable.");
|
||||
|
||||
#if defined(SANITIZER)
|
||||
global_context->addWarningMessage("Server was built with sanitizer. It will work slowly.");
|
||||
auto sanitizers = getSanitizerNames();
|
||||
|
||||
String log_message;
|
||||
if (sanitizers.empty())
|
||||
log_message = "sanitizer";
|
||||
else if (sanitizers.size() == 1)
|
||||
log_message = fmt::format("{} sanitizer", sanitizers.front());
|
||||
else
|
||||
log_message = fmt::format("sanitizers ({})", fmt::join(sanitizers, ", "));
|
||||
|
||||
global_context->addWarningMessage(fmt::format("Server was built with {}. It will work slowly.", log_message));
|
||||
#endif
|
||||
|
||||
#if defined(SANITIZE_COVERAGE) || WITH_COVERAGE
|
||||
@ -1953,6 +2038,11 @@ try
|
||||
/// otherwise there is a race condition between the system database initialization
|
||||
/// and creation of new tables in the database.
|
||||
waitLoad(TablesLoaderForegroundPoolId, system_startup_tasks);
|
||||
|
||||
/// Startup scripts can depend on the system log tables.
|
||||
if (config().has("startup_scripts") && !server_settings.prepare_system_log_tables_on_startup.changed)
|
||||
global_context->setServerSetting("prepare_system_log_tables_on_startup", true);
|
||||
|
||||
/// After attaching system databases we can initialize system log.
|
||||
global_context->initializeSystemLogs();
|
||||
global_context->setSystemZooKeeperLogAfterInitializationIfNeeded();
|
||||
@ -2101,6 +2191,9 @@ try
|
||||
load_metadata_tasks.clear();
|
||||
load_metadata_tasks.shrink_to_fit();
|
||||
|
||||
if (config().has("startup_scripts"))
|
||||
loadStartupScripts(config(), global_context, log);
|
||||
|
||||
{
|
||||
std::lock_guard lock(servers_lock);
|
||||
for (auto & server : servers)
|
||||
|
@ -29,48 +29,49 @@ namespace ErrorCodes
|
||||
}
|
||||
|
||||
BackupReaderAzureBlobStorage::BackupReaderAzureBlobStorage(
|
||||
const StorageAzureConfiguration & configuration_,
|
||||
const AzureBlobStorage::ConnectionParams & connection_params_,
|
||||
const String & blob_path_,
|
||||
bool allow_azure_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_)
|
||||
: BackupReaderDefault(read_settings_, write_settings_, getLogger("BackupReaderAzureBlobStorage"))
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.getConnectionURL().toString(), false, false}
|
||||
, configuration(configuration_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, connection_params_.getConnectionURL(), false, false}
|
||||
, connection_params(connection_params_)
|
||||
, blob_path(blob_path_)
|
||||
{
|
||||
auto client_ptr = configuration.createClient(/* is_readonly */false, /* attempt_to_create_container */true);
|
||||
client_ptr->SetClickhouseOptions(Azure::Storage::Blobs::ClickhouseClientOptions{.IsClientForDisk=true});
|
||||
auto client_ptr = AzureBlobStorage::getContainerClient(connection_params, /*readonly=*/ false);
|
||||
auto settings_ptr = AzureBlobStorage::getRequestSettingsForBackup(context_->getSettingsRef(), allow_azure_native_copy);
|
||||
|
||||
object_storage = std::make_unique<AzureObjectStorage>("BackupReaderAzureBlobStorage",
|
||||
std::move(client_ptr),
|
||||
configuration.createSettings(context_),
|
||||
configuration_.container,
|
||||
configuration.getConnectionURL().toString());
|
||||
object_storage = std::make_unique<AzureObjectStorage>(
|
||||
"BackupReaderAzureBlobStorage",
|
||||
std::move(client_ptr),
|
||||
std::move(settings_ptr),
|
||||
connection_params.getContainer(),
|
||||
connection_params.getConnectionURL());
|
||||
|
||||
client = object_storage->getAzureBlobStorageClient();
|
||||
auto settings_copy = *object_storage->getSettings();
|
||||
settings_copy.use_native_copy = allow_azure_native_copy;
|
||||
settings = std::make_unique<const AzureObjectStorageSettings>(settings_copy);
|
||||
settings = object_storage->getSettings();
|
||||
}
|
||||
|
||||
BackupReaderAzureBlobStorage::~BackupReaderAzureBlobStorage() = default;
|
||||
|
||||
bool BackupReaderAzureBlobStorage::fileExists(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
return object_storage->exists(StoredObject(key));
|
||||
}
|
||||
|
||||
UInt64 BackupReaderAzureBlobStorage::getFileSize(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
ObjectMetadata object_metadata = object_storage->getObjectMetadata(key);
|
||||
return object_metadata.size_bytes;
|
||||
}
|
||||
|
||||
std::unique_ptr<SeekableReadBuffer> BackupReaderAzureBlobStorage::readFile(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
return std::make_unique<ReadBufferFromAzureBlobStorage>(
|
||||
client, key, read_settings, settings->max_single_read_retries,
|
||||
settings->max_single_download_retries);
|
||||
@ -85,23 +86,23 @@ void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup,
|
||||
&& destination_data_source_description.is_encrypted == encrypted_in_backup)
|
||||
{
|
||||
LOG_TRACE(log, "Copying {} from AzureBlobStorage to disk {}", path_in_backup, destination_disk->getName());
|
||||
auto write_blob_function = [&](const Strings & blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
|
||||
auto write_blob_function = [&](const Strings & dst_blob_path, WriteMode mode, const std::optional<ObjectAttributes> &) -> size_t
|
||||
{
|
||||
/// Object storage always uses mode `Rewrite` because it simulates append using metadata and different files.
|
||||
if (blob_path.size() != 2 || mode != WriteMode::Rewrite)
|
||||
if (dst_blob_path.size() != 2 || mode != WriteMode::Rewrite)
|
||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||
"Blob writing function called with unexpected blob_path.size={} or mode={}",
|
||||
blob_path.size(), mode);
|
||||
dst_blob_path.size(), mode);
|
||||
|
||||
copyAzureBlobStorageFile(
|
||||
client,
|
||||
destination_disk->getObjectStorage()->getAzureBlobStorageClient(),
|
||||
configuration.container,
|
||||
fs::path(configuration.blob_path) / path_in_backup,
|
||||
connection_params.getContainer(),
|
||||
fs::path(blob_path) / path_in_backup,
|
||||
0,
|
||||
file_size,
|
||||
/* dest_container */ blob_path[1],
|
||||
/* dest_path */ blob_path[0],
|
||||
/* dest_container */ dst_blob_path[1],
|
||||
/* dest_path */ dst_blob_path[0],
|
||||
settings,
|
||||
read_settings,
|
||||
threadPoolCallbackRunnerUnsafe<void>(getBackupsIOThreadPool().get(), "BackupRDAzure"));
|
||||
@ -119,28 +120,33 @@ void BackupReaderAzureBlobStorage::copyFileToDisk(const String & path_in_backup,
|
||||
|
||||
|
||||
BackupWriterAzureBlobStorage::BackupWriterAzureBlobStorage(
|
||||
const StorageAzureConfiguration & configuration_,
|
||||
const AzureBlobStorage::ConnectionParams & connection_params_,
|
||||
const String & blob_path_,
|
||||
bool allow_azure_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
const ContextPtr & context_,
|
||||
bool attempt_to_create_container)
|
||||
: BackupWriterDefault(read_settings_, write_settings_, getLogger("BackupWriterAzureBlobStorage"))
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, configuration_.getConnectionURL().toString(), false, false}
|
||||
, configuration(configuration_)
|
||||
, data_source_description{DataSourceType::ObjectStorage, ObjectStorageType::Azure, MetadataStorageType::None, connection_params_.getConnectionURL(), false, false}
|
||||
, connection_params(connection_params_)
|
||||
, blob_path(blob_path_)
|
||||
{
|
||||
auto client_ptr = configuration.createClient(/* is_readonly */false, attempt_to_create_container);
|
||||
client_ptr->SetClickhouseOptions(Azure::Storage::Blobs::ClickhouseClientOptions{.IsClientForDisk=true});
|
||||
if (!attempt_to_create_container)
|
||||
connection_params.endpoint.container_already_exists = true;
|
||||
|
||||
auto client_ptr = AzureBlobStorage::getContainerClient(connection_params, /*readonly=*/ false);
|
||||
auto settings_ptr = AzureBlobStorage::getRequestSettingsForBackup(context_->getSettingsRef(), allow_azure_native_copy);
|
||||
|
||||
object_storage = std::make_unique<AzureObjectStorage>(
|
||||
"BackupWriterAzureBlobStorage",
|
||||
std::move(client_ptr),
|
||||
std::move(settings_ptr),
|
||||
connection_params.getContainer(),
|
||||
connection_params.getConnectionURL());
|
||||
|
||||
object_storage = std::make_unique<AzureObjectStorage>("BackupWriterAzureBlobStorage",
|
||||
std::move(client_ptr),
|
||||
configuration.createSettings(context_),
|
||||
configuration.container,
|
||||
configuration_.getConnectionURL().toString());
|
||||
client = object_storage->getAzureBlobStorageClient();
|
||||
auto settings_copy = *object_storage->getSettings();
|
||||
settings_copy.use_native_copy = allow_azure_native_copy;
|
||||
settings = std::make_unique<const AzureObjectStorageSettings>(settings_copy);
|
||||
settings = object_storage->getSettings();
|
||||
}
|
||||
|
||||
void BackupWriterAzureBlobStorage::copyFileFromDisk(
|
||||
@ -159,18 +165,18 @@ void BackupWriterAzureBlobStorage::copyFileFromDisk(
|
||||
{
|
||||
/// getBlobPath() can return more than 3 elements if the file is stored as multiple objects in AzureBlobStorage container.
|
||||
/// In this case we can't use the native copy.
|
||||
if (auto blob_path = src_disk->getBlobPath(src_path); blob_path.size() == 2)
|
||||
if (auto src_blob_path = src_disk->getBlobPath(src_path); src_blob_path.size() == 2)
|
||||
{
|
||||
LOG_TRACE(log, "Copying file {} from disk {} to AzureBlobStorag", src_path, src_disk->getName());
|
||||
copyAzureBlobStorageFile(
|
||||
src_disk->getObjectStorage()->getAzureBlobStorageClient(),
|
||||
client,
|
||||
/* src_container */ blob_path[1],
|
||||
/* src_path */ blob_path[0],
|
||||
/* src_container */ src_blob_path[1],
|
||||
/* src_path */ src_blob_path[0],
|
||||
start_pos,
|
||||
length,
|
||||
configuration.container,
|
||||
fs::path(configuration.blob_path) / path_in_backup,
|
||||
connection_params.getContainer(),
|
||||
fs::path(blob_path) / path_in_backup,
|
||||
settings,
|
||||
read_settings,
|
||||
threadPoolCallbackRunnerUnsafe<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"));
|
||||
@ -188,11 +194,11 @@ void BackupWriterAzureBlobStorage::copyFile(const String & destination, const St
|
||||
copyAzureBlobStorageFile(
|
||||
client,
|
||||
client,
|
||||
configuration.container,
|
||||
fs::path(configuration.blob_path)/ source,
|
||||
connection_params.getContainer(),
|
||||
fs::path(blob_path)/ source,
|
||||
0,
|
||||
size,
|
||||
/* dest_container */ configuration.container,
|
||||
/* dest_container */ connection_params.getContainer(),
|
||||
/* dest_path */ destination,
|
||||
settings,
|
||||
read_settings,
|
||||
@ -206,22 +212,28 @@ void BackupWriterAzureBlobStorage::copyDataToFile(
|
||||
UInt64 length)
|
||||
{
|
||||
copyDataToAzureBlobStorageFile(
|
||||
create_read_buffer, start_pos, length, client, configuration.container,
|
||||
fs::path(configuration.blob_path) / path_in_backup, settings,
|
||||
threadPoolCallbackRunnerUnsafe<void>(getBackupsIOThreadPool().get(), "BackupWRAzure"));
|
||||
create_read_buffer,
|
||||
start_pos,
|
||||
length,
|
||||
client,
|
||||
connection_params.getContainer(),
|
||||
fs::path(blob_path) / path_in_backup,
|
||||
settings,
|
||||
threadPoolCallbackRunnerUnsafe<void>(getBackupsIOThreadPool().get(),
|
||||
"BackupWRAzure"));
|
||||
}
|
||||
|
||||
BackupWriterAzureBlobStorage::~BackupWriterAzureBlobStorage() = default;
|
||||
|
||||
bool BackupWriterAzureBlobStorage::fileExists(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
return object_storage->exists(StoredObject(key));
|
||||
}
|
||||
|
||||
UInt64 BackupWriterAzureBlobStorage::getFileSize(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
RelativePathsWithMetadata children;
|
||||
object_storage->listObjects(key,children,/*max_keys*/0);
|
||||
if (children.empty())
|
||||
@ -231,7 +243,7 @@ UInt64 BackupWriterAzureBlobStorage::getFileSize(const String & file_name)
|
||||
|
||||
std::unique_ptr<ReadBuffer> BackupWriterAzureBlobStorage::readFile(const String & file_name, size_t /*expected_file_size*/)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
return std::make_unique<ReadBufferFromAzureBlobStorage>(
|
||||
client, key, read_settings, settings->max_single_read_retries,
|
||||
settings->max_single_download_retries);
|
||||
@ -239,7 +251,7 @@ std::unique_ptr<ReadBuffer> BackupWriterAzureBlobStorage::readFile(const String
|
||||
|
||||
std::unique_ptr<WriteBuffer> BackupWriterAzureBlobStorage::writeFile(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
return std::make_unique<WriteBufferFromAzureBlobStorage>(
|
||||
client,
|
||||
key,
|
||||
@ -251,7 +263,7 @@ std::unique_ptr<WriteBuffer> BackupWriterAzureBlobStorage::writeFile(const Strin
|
||||
|
||||
void BackupWriterAzureBlobStorage::removeFile(const String & file_name)
|
||||
{
|
||||
String key = fs::path(configuration.blob_path) / file_name;
|
||||
String key = fs::path(blob_path) / file_name;
|
||||
StoredObject object(key);
|
||||
object_storage->removeObjectIfExists(object);
|
||||
}
|
||||
@ -260,7 +272,7 @@ void BackupWriterAzureBlobStorage::removeFiles(const Strings & file_names)
|
||||
{
|
||||
StoredObjects objects;
|
||||
for (const auto & file_name : file_names)
|
||||
objects.emplace_back(fs::path(configuration.blob_path) / file_name);
|
||||
objects.emplace_back(fs::path(blob_path) / file_name);
|
||||
|
||||
object_storage->removeObjectsIfExist(objects);
|
||||
|
||||
@ -270,7 +282,7 @@ void BackupWriterAzureBlobStorage::removeFilesBatch(const Strings & file_names)
|
||||
{
|
||||
StoredObjects objects;
|
||||
for (const auto & file_name : file_names)
|
||||
objects.emplace_back(fs::path(configuration.blob_path) / file_name);
|
||||
objects.emplace_back(fs::path(blob_path) / file_name);
|
||||
|
||||
object_storage->removeObjectsIfExist(objects);
|
||||
}
|
||||
|
@ -1,12 +1,10 @@
|
||||
#pragma once
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
#include <Backups/BackupIO_Default.h>
|
||||
#include <Disks/DiskType.h>
|
||||
#include <Interpreters/Context_fwd.h>
|
||||
#include <Storages/ObjectStorage/Azure/Configuration.h>
|
||||
#include <Disks/ObjectStorages/AzureBlobStorage/AzureObjectStorage.h>
|
||||
|
||||
|
||||
namespace DB
|
||||
@ -17,7 +15,8 @@ class BackupReaderAzureBlobStorage : public BackupReaderDefault
|
||||
{
|
||||
public:
|
||||
BackupReaderAzureBlobStorage(
|
||||
const StorageAzureConfiguration & configuration_,
|
||||
const AzureBlobStorage::ConnectionParams & connection_params_,
|
||||
const String & blob_path_,
|
||||
bool allow_azure_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
@ -40,16 +39,18 @@ public:
|
||||
private:
|
||||
const DataSourceDescription data_source_description;
|
||||
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client;
|
||||
StorageAzureConfiguration configuration;
|
||||
AzureBlobStorage::ConnectionParams connection_params;
|
||||
String blob_path;
|
||||
std::unique_ptr<AzureObjectStorage> object_storage;
|
||||
std::shared_ptr<const AzureObjectStorageSettings> settings;
|
||||
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings;
|
||||
};
|
||||
|
||||
class BackupWriterAzureBlobStorage : public BackupWriterDefault
|
||||
{
|
||||
public:
|
||||
BackupWriterAzureBlobStorage(
|
||||
const StorageAzureConfiguration & configuration_,
|
||||
const AzureBlobStorage::ConnectionParams & connection_params_,
|
||||
const String & blob_path_,
|
||||
bool allow_azure_native_copy,
|
||||
const ReadSettings & read_settings_,
|
||||
const WriteSettings & write_settings_,
|
||||
@ -87,9 +88,10 @@ private:
|
||||
|
||||
const DataSourceDescription data_source_description;
|
||||
std::shared_ptr<const Azure::Storage::Blobs::BlobContainerClient> client;
|
||||
StorageAzureConfiguration configuration;
|
||||
AzureBlobStorage::ConnectionParams connection_params;
|
||||
String blob_path;
|
||||
std::unique_ptr<AzureObjectStorage> object_storage;
|
||||
std::shared_ptr<const AzureObjectStorageSettings> settings;
|
||||
std::shared_ptr<const AzureBlobStorage::RequestSettings> settings;
|
||||
};
|
||||
|
||||
}
|
||||
|
@ -5,6 +5,7 @@
|
||||
|
||||
#if USE_AZURE_BLOB_STORAGE
|
||||
#include <Backups/BackupIO_AzureBlobStorage.h>
|
||||
#include <Disks/ObjectStorages/AzureBlobStorage/AzureBlobStorageCommon.h>
|
||||
#include <Backups/BackupImpl.h>
|
||||
#include <IO/Archives/hasRegisteredArchiveFileExtension.h>
|
||||
#include <Interpreters/Context.h>
|
||||
@ -49,7 +50,9 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
const String & id_arg = params.backup_info.id_arg;
|
||||
const auto & args = params.backup_info.args;
|
||||
|
||||
StorageAzureConfiguration configuration;
|
||||
String blob_path;
|
||||
AzureBlobStorage::ConnectionParams connection_params;
|
||||
auto request_settings = AzureBlobStorage::getRequestSettings(params.context->getSettingsRef());
|
||||
|
||||
if (!id_arg.empty())
|
||||
{
|
||||
@ -59,55 +62,42 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
if (!config.has(config_prefix))
|
||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "There is no collection named `{}` in config", id_arg);
|
||||
|
||||
if (config.has(config_prefix + ".connection_string"))
|
||||
connection_params =
|
||||
{
|
||||
configuration.connection_url = config.getString(config_prefix + ".connection_string");
|
||||
configuration.is_connection_string = true;
|
||||
configuration.container = config.getString(config_prefix + ".container");
|
||||
}
|
||||
else
|
||||
{
|
||||
configuration.connection_url = config.getString(config_prefix + ".storage_account_url");
|
||||
configuration.is_connection_string = false;
|
||||
configuration.container = config.getString(config_prefix + ".container");
|
||||
configuration.account_name = config.getString(config_prefix + ".account_name");
|
||||
configuration.account_key = config.getString(config_prefix + ".account_key");
|
||||
|
||||
if (config.has(config_prefix + ".account_name") && config.has(config_prefix + ".account_key"))
|
||||
{
|
||||
configuration.account_name = config.getString(config_prefix + ".account_name");
|
||||
configuration.account_key = config.getString(config_prefix + ".account_key");
|
||||
}
|
||||
}
|
||||
.endpoint = AzureBlobStorage::processEndpoint(config, config_prefix),
|
||||
.auth_method = AzureBlobStorage::getAuthMethod(config, config_prefix),
|
||||
.client_options = AzureBlobStorage::getClientOptions(*request_settings, /*for_disk=*/ true),
|
||||
};
|
||||
|
||||
if (args.size() > 1)
|
||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
||||
"Backup AzureBlobStorage requires 1 or 2 arguments: named_collection, [filename]");
|
||||
|
||||
if (args.size() == 1)
|
||||
configuration.setPath(args[0].safeGet<String>());
|
||||
|
||||
blob_path = args[0].safeGet<String>();
|
||||
}
|
||||
else
|
||||
{
|
||||
if (args.size() == 3)
|
||||
{
|
||||
configuration.connection_url = args[0].safeGet<String>();
|
||||
configuration.is_connection_string = !configuration.connection_url.starts_with("http");
|
||||
auto connection_url = args[0].safeGet<String>();
|
||||
auto container_name = args[1].safeGet<String>();
|
||||
blob_path = args[2].safeGet<String>();
|
||||
|
||||
configuration.container = args[1].safeGet<String>();
|
||||
configuration.blob_path = args[2].safeGet<String>();
|
||||
AzureBlobStorage::processURL(connection_url, container_name, connection_params.endpoint, connection_params.auth_method);
|
||||
connection_params.client_options = AzureBlobStorage::getClientOptions(*request_settings, /*for_disk=*/ true);
|
||||
}
|
||||
else if (args.size() == 5)
|
||||
{
|
||||
configuration.connection_url = args[0].safeGet<String>();
|
||||
configuration.is_connection_string = false;
|
||||
connection_params.endpoint.storage_account_url = args[0].safeGet<String>();
|
||||
connection_params.endpoint.container_name = args[1].safeGet<String>();
|
||||
blob_path = args[2].safeGet<String>();
|
||||
|
||||
configuration.container = args[1].safeGet<String>();
|
||||
configuration.blob_path = args[2].safeGet<String>();
|
||||
configuration.account_name = args[3].safeGet<String>();
|
||||
configuration.account_key = args[4].safeGet<String>();
|
||||
auto account_name = args[3].safeGet<String>();
|
||||
auto account_key = args[4].safeGet<String>();
|
||||
|
||||
connection_params.auth_method = std::make_shared<Azure::Storage::StorageSharedKeyCredential>(account_name, account_key);
|
||||
connection_params.client_options = AzureBlobStorage::getClientOptions(*request_settings, /*for_disk=*/ true);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -117,16 +107,12 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
}
|
||||
|
||||
BackupImpl::ArchiveParams archive_params;
|
||||
if (hasRegisteredArchiveFileExtension(configuration.getPath()))
|
||||
if (hasRegisteredArchiveFileExtension(blob_path))
|
||||
{
|
||||
if (params.is_internal_backup)
|
||||
throw Exception(ErrorCodes::SUPPORT_IS_DISABLED, "Using archives with backups on clusters is disabled");
|
||||
|
||||
auto path = configuration.getPath();
|
||||
auto filename = removeFileNameFromURL(path);
|
||||
configuration.setPath(path);
|
||||
|
||||
archive_params.archive_name = filename;
|
||||
archive_params.archive_name = removeFileNameFromURL(blob_path);
|
||||
archive_params.compression_method = params.compression_method;
|
||||
archive_params.compression_level = params.compression_level;
|
||||
archive_params.password = params.password;
|
||||
@ -141,7 +127,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
if (params.open_mode == IBackup::OpenMode::READ)
|
||||
{
|
||||
auto reader = std::make_shared<BackupReaderAzureBlobStorage>(
|
||||
configuration,
|
||||
connection_params,
|
||||
blob_path,
|
||||
params.allow_azure_native_copy,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
@ -159,7 +146,8 @@ void registerBackupEngineAzureBlobStorage(BackupFactory & factory)
|
||||
else
|
||||
{
|
||||
auto writer = std::make_shared<BackupWriterAzureBlobStorage>(
|
||||
configuration,
|
||||
connection_params,
|
||||
blob_path,
|
||||
params.allow_azure_native_copy,
|
||||
params.read_settings,
|
||||
params.write_settings,
|
||||
|
@ -607,6 +607,10 @@ if (TARGET ch_contrib::usearch)
|
||||
dbms_target_link_libraries(PUBLIC ch_contrib::usearch)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_contrib::prometheus_protobufs)
|
||||
dbms_target_link_libraries (PUBLIC ch_contrib::prometheus_protobufs)
|
||||
endif()
|
||||
|
||||
if (TARGET ch_rust::skim)
|
||||
dbms_target_include_directories(PRIVATE $<TARGET_PROPERTY:ch_rust::skim,INTERFACE_INCLUDE_DIRECTORIES>)
|
||||
dbms_target_link_libraries(PUBLIC ch_rust::skim)
|
||||
|
@ -6,13 +6,13 @@
|
||||
#include <Common/ProgressIndication.h>
|
||||
#include <Common/InterruptListener.h>
|
||||
#include <Common/ShellCommand.h>
|
||||
#include <Common/QueryFuzzer.h>
|
||||
#include <Common/Stopwatch.h>
|
||||
#include <Common/DNSResolver.h>
|
||||
#include <Core/ExternalTable.h>
|
||||
#include <Poco/Util/Application.h>
|
||||
#include <Interpreters/Context.h>
|
||||
#include <Client/Suggest.h>
|
||||
#include <Client/QueryFuzzer.h>
|
||||
#include <boost/program_options.hpp>
|
||||
#include <Storages/StorageFile.h>
|
||||
#include <Storages/SelectQueryInfo.h>
|
||||
|
@ -195,6 +195,12 @@ void HedgedConnections::sendQuery(
|
||||
modified_settings.parallel_replica_offset = fd_to_replica_location[replica.packet_receiver->getFileDescriptor()].offset;
|
||||
}
|
||||
|
||||
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
|
||||
replica.connection->sendQuery(timeouts, query, /* query_parameters */ {}, query_id, stage, &modified_settings, &client_info, with_pending_data, {});
|
||||
replica.change_replica_timeout.setRelative(timeouts.receive_data_timeout);
|
||||
replica.packet_receiver->setTimeout(hedged_connections_factory.getConnectionTimeouts().receive_timeout);
|
||||
|
@ -150,6 +150,12 @@ void MultiplexedConnections::sendQuery(
|
||||
}
|
||||
}
|
||||
|
||||
/// FIXME: Remove once we will make `allow_experimental_analyzer` obsolete setting.
|
||||
/// Make the analyzer being set, so it will be effectively applied on the remote server.
|
||||
/// In other words, the initiator always controls whether the analyzer enabled or not for
|
||||
/// all servers involved in the distributed query processing.
|
||||
modified_settings.set("allow_experimental_analyzer", static_cast<bool>(modified_settings.allow_experimental_analyzer));
|
||||
|
||||
const bool enable_sample_offset_parallel_processing = settings.max_parallel_replicas > 1 && settings.allow_experimental_parallel_reading_from_replicas == 0;
|
||||
|
||||
size_t num_replicas = replica_states.size();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user