mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-10 01:25:21 +00:00
Merge remote-tracking branch 'upstream/master' into HEAD
This commit is contained in:
commit
887a9ef184
@ -5,9 +5,7 @@
|
||||
# a) the new check is not controversial (this includes many checks in readability-* and google-*) or
|
||||
# b) too noisy (checks with > 100 new warnings are considered noisy, this includes e.g. cppcoreguidelines-*).
|
||||
|
||||
# TODO Let clang-tidy check headers in further directories
|
||||
# --> HeaderFilterRegex: '^.*/(src|base|programs|utils)/.*(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(base|programs|utils)/.*(h|hpp)$'
|
||||
HeaderFilterRegex: '^.*/(base|src|programs|utils)/.*(h|hpp)$'
|
||||
|
||||
Checks: [
|
||||
'*',
|
||||
@ -22,6 +20,7 @@ Checks: [
|
||||
'-bugprone-branch-clone',
|
||||
'-bugprone-easily-swappable-parameters',
|
||||
'-bugprone-exception-escape',
|
||||
'-bugprone-forward-declaration-namespace',
|
||||
'-bugprone-implicit-widening-of-multiplication-result',
|
||||
'-bugprone-narrowing-conversions',
|
||||
'-bugprone-not-null-terminated-result',
|
||||
@ -37,6 +36,8 @@ Checks: [
|
||||
'-cert-oop54-cpp',
|
||||
'-cert-oop57-cpp',
|
||||
|
||||
'-clang-analyzer-optin.performance.Padding',
|
||||
|
||||
'-clang-analyzer-unix.Malloc',
|
||||
|
||||
'-cppcoreguidelines-*', # impractical in a codebase as large as ClickHouse, also slow
|
||||
|
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
29
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -40,3 +40,32 @@ At a minimum, the following information should be added (but add more as needed)
|
||||
|
||||
|
||||
> Information about CI checks: https://clickhouse.com/docs/en/development/continuous-integration/
|
||||
|
||||
---
|
||||
### Modify your CI run:
|
||||
**NOTE:** If your merge the PR with modified CI you **MUST KNOW** what you are doing
|
||||
**NOTE:** Set desired options before CI starts or re-push after updates
|
||||
|
||||
#### Run only:
|
||||
- [ ] <!---ci_set_integration--> Integration tests
|
||||
- [ ] <!---ci_set_arm--> Integration tests (arm64)
|
||||
- [ ] <!---ci_set_stateless--> Stateless tests (release)
|
||||
- [ ] <!---ci_set_stateless_asan--> Stateless tests (asan)
|
||||
- [ ] <!---ci_set_stateful--> Stateful tests (release)
|
||||
- [ ] <!---ci_set_stateful_asan--> Stateful tests (asan)
|
||||
- [ ] <!---ci_set_reduced--> No sanitizers
|
||||
- [ ] <!---ci_set_analyzer--> Tests with analyzer
|
||||
- [ ] <!---ci_set_fast--> Fast tests
|
||||
- [ ] <!---job_package_debug--> Only package_debug build
|
||||
- [ ] <!---PLACE_YOUR_TAG_CONFIGURED_IN_ci_config.py_FILE_HERE--> Add your CI variant description here
|
||||
|
||||
#### CI options:
|
||||
- [ ] <!---do_not_test--> do not test (only style check)
|
||||
- [ ] <!---no_merge_commit--> disable merge-commit (no merge from master before tests)
|
||||
- [ ] <!---no_ci_cache--> disable CI cache (job reuse)
|
||||
|
||||
#### Only specified batches in multi-batch jobs:
|
||||
- [ ] <!---batch_0--> 1
|
||||
- [ ] <!---batch_1--> 2
|
||||
- [ ] <!---batch_2--> 3
|
||||
- [ ] <!---batch_3--> 4
|
||||
|
@ -26,4 +26,4 @@
|
||||
|
||||
## To run only specified batches for multi-batch job(s)
|
||||
#batch_2
|
||||
#btach_1_2_3
|
||||
#batch_1_2_3
|
||||
|
@ -61,11 +61,16 @@ if (ENABLE_CHECK_HEAVY_BUILDS)
|
||||
# set CPU time limit to 1000 seconds
|
||||
set (RLIMIT_CPU 1000)
|
||||
|
||||
# -fsanitize=memory and address are too heavy
|
||||
# Sanitizers are too heavy
|
||||
if (SANITIZE OR SANITIZE_COVERAGE OR WITH_COVERAGE)
|
||||
set (RLIMIT_DATA 10000000000) # 10G
|
||||
endif()
|
||||
|
||||
# For some files currently building RISCV64 might be too slow. TODO: Improve compilation times per file
|
||||
if (ARCH_RISCV64)
|
||||
set (RLIMIT_CPU 1800)
|
||||
endif()
|
||||
|
||||
set (CMAKE_CXX_COMPILER_LAUNCHER prlimit --as=${RLIMIT_AS} --data=${RLIMIT_DATA} --cpu=${RLIMIT_CPU} ${CMAKE_CXX_COMPILER_LAUNCHER})
|
||||
endif ()
|
||||
|
||||
@ -102,6 +107,8 @@ if (ENABLE_FUZZING)
|
||||
|
||||
# For codegen_select_fuzzer
|
||||
set (ENABLE_PROTOBUF 1)
|
||||
|
||||
add_compile_definitions(FUZZING_MODE=1)
|
||||
endif()
|
||||
|
||||
# Global libraries
|
||||
@ -574,7 +581,7 @@ if (FUZZER)
|
||||
if (NOT(target_type STREQUAL "INTERFACE_LIBRARY" OR target_type STREQUAL "UTILITY"))
|
||||
target_compile_options(${target} PRIVATE "-fsanitize=fuzzer-no-link")
|
||||
endif()
|
||||
if (target_type STREQUAL "EXECUTABLE" AND (target MATCHES ".+_fuzzer" OR target STREQUAL "clickhouse"))
|
||||
if (target_type STREQUAL "EXECUTABLE" AND target MATCHES ".+_fuzzer")
|
||||
message(STATUS "${target} instrumented with fuzzer")
|
||||
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer)
|
||||
# Add to fuzzers bundle
|
||||
@ -583,6 +590,12 @@ if (FUZZER)
|
||||
get_target_property(target_bin_dir ${target} BINARY_DIR)
|
||||
add_custom_command(TARGET fuzzers POST_BUILD COMMAND mv "${target_bin_dir}/${target_bin_name}" "${CMAKE_CURRENT_BINARY_DIR}/programs/" VERBATIM)
|
||||
endif()
|
||||
if (target STREQUAL "clickhouse")
|
||||
message(STATUS "${target} instrumented with fuzzer")
|
||||
target_link_libraries(${target} PUBLIC ch_contrib::fuzzer_no_main)
|
||||
# Add to fuzzers bundle
|
||||
add_dependencies(fuzzers ${target})
|
||||
endif()
|
||||
endif()
|
||||
endforeach()
|
||||
add_custom_command(TARGET fuzzers POST_BUILD COMMAND SRC=${CMAKE_SOURCE_DIR} BIN=${CMAKE_BINARY_DIR} OUT=${CMAKE_BINARY_DIR}/programs ${CMAKE_SOURCE_DIR}/tests/fuzz/build.sh VERBATIM)
|
||||
|
@ -20,6 +20,7 @@ set (SRCS
|
||||
getPageSize.cpp
|
||||
getThreadId.cpp
|
||||
int8_to_string.cpp
|
||||
itoa.cpp
|
||||
JSON.cpp
|
||||
mremap.cpp
|
||||
phdr_cache.cpp
|
||||
|
@ -1,8 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#include <base/strong_typedef.h>
|
||||
#include <base/extended_types.h>
|
||||
#include <Common/formatIPv6.h>
|
||||
#include <base/strong_typedef.h>
|
||||
#include <Common/memcmpSmall.h>
|
||||
|
||||
namespace DB
|
||||
@ -62,7 +61,8 @@ namespace std
|
||||
{
|
||||
size_t operator()(const DB::IPv6 & x) const
|
||||
{
|
||||
return std::hash<std::string_view>{}(std::string_view(reinterpret_cast<const char*>(&x.toUnderType()), IPV6_BINARY_LENGTH));
|
||||
return std::hash<std::string_view>{}(
|
||||
std::string_view(reinterpret_cast<const char *>(&x.toUnderType()), sizeof(DB::IPv6::UnderlyingType)));
|
||||
}
|
||||
};
|
||||
|
||||
|
503
base/base/itoa.cpp
Normal file
503
base/base/itoa.cpp
Normal file
@ -0,0 +1,503 @@
|
||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
||||
//
|
||||
//=== itoa.cpp - Fast integer to ascii conversion --*- C++ -*-//
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <cstddef>
|
||||
#include <cstdint>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <base/defines.h>
|
||||
#include <base/extended_types.h>
|
||||
#include <base/itoa.h>
|
||||
|
||||
namespace
|
||||
{
|
||||
template <typename T>
|
||||
ALWAYS_INLINE inline constexpr T pow10(size_t x)
|
||||
{
|
||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
||||
}
|
||||
|
||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
||||
// This strength reduction is also done by optimizing compilers, but
|
||||
// presently the fastest results are produced by using the values
|
||||
// for the multiplication and the shift as given by the algorithm
|
||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
||||
//
|
||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
||||
//
|
||||
// "Integer division by a constant (all processors)
|
||||
// A floating point number can be divided by a constant by multiplying
|
||||
// with the reciprocal. If we want to do the same with integers, we have
|
||||
// to scale the reciprocal by 2n and then shift the product to the right
|
||||
// by n. There are various algorithms for finding a suitable value of n
|
||||
// and compensating for rounding errors. The algorithm described below
|
||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
||||
|
||||
/// Division by constant is performed by:
|
||||
/// 1. Adding 1 if needed;
|
||||
/// 2. Multiplying by another constant;
|
||||
/// 3. Shifting right by another constant.
|
||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
||||
struct Division
|
||||
{
|
||||
static constexpr bool add{add_};
|
||||
static constexpr UInt multiplier{multiplier_};
|
||||
static constexpr unsigned shift{shift_};
|
||||
};
|
||||
|
||||
/// Select a type with appropriate number of bytes from the list of types.
|
||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
||||
template <size_t N, typename T, typename... Ts>
|
||||
struct SelectType
|
||||
{
|
||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
struct SelectType<1, T, Ts...>
|
||||
{
|
||||
using Result = T;
|
||||
};
|
||||
|
||||
|
||||
/// Division by 10^N where N is the size of the type.
|
||||
template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType<
|
||||
N,
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
using UnsignedOfSize = typename SelectType<N, uint8_t, uint16_t, uint32_t, uint64_t, __uint128_t>::Result;
|
||||
|
||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
||||
template <size_t N>
|
||||
struct QuotientAndRemainder
|
||||
{
|
||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
QuotientAndRemainder<N> inline split(UnsignedOfSize<N> value)
|
||||
{
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * outDigit(char * p, uint8_t value)
|
||||
{
|
||||
*p = '0' + value;
|
||||
++p;
|
||||
return p;
|
||||
}
|
||||
|
||||
// Using a lookup table to convert binary numbers from 0 to 99
|
||||
// into ascii characters as described by Andrei Alexandrescu in
|
||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||
|
||||
const char digits[201] = "00010203040506070809"
|
||||
"10111213141516171819"
|
||||
"20212223242526272829"
|
||||
"30313233343536373839"
|
||||
"40414243444546474849"
|
||||
"50515253545556575859"
|
||||
"60616263646566676869"
|
||||
"70717273747576777879"
|
||||
"80818283848586878889"
|
||||
"90919293949596979899";
|
||||
|
||||
ALWAYS_INLINE inline char * outTwoDigits(char * p, uint8_t value)
|
||||
{
|
||||
memcpy(p, &digits[value * 2], 2);
|
||||
p += 2;
|
||||
return p;
|
||||
}
|
||||
|
||||
namespace convert
|
||||
{
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * head(char * p, UInt u);
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
char * tail(char * p, UInt u);
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// head: find most significant digit, skip leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// "x" contains quotient and remainder after division by 10^N
|
||||
// quotient is less than 10^N
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * head(char * p, UInt u)
|
||||
{
|
||||
return u < pow10<UnsignedOfSize<N>>(N) ? head(p, UnsignedOfSize<N / 2>(u)) : head<N>(p, split<N>(u));
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return u < 10 ? outDigit(p, u) : outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// tail: produce all digits including leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// recursive step, "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
ALWAYS_INLINE inline char * tail(char * p, UInt u)
|
||||
{
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// large values are >= 10^2*N
|
||||
// where x contains quotient and remainder after division by 10^N
|
||||
//===----------------------------------------------------------===//
|
||||
template <size_t N>
|
||||
ALWAYS_INLINE inline char * large(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
||||
p = tail(p, y.remainder);
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle values of "u" that might be >= 10^2*N
|
||||
// where N is the size of "u" in bytes
|
||||
//===----------------------------------------------------------===//
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
ALWAYS_INLINE inline char * uitoa(char * p, UInt u)
|
||||
{
|
||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
||||
return head(p, UnsignedOfSize<N / 2>(u));
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
|
||||
return u < pow10<UnsignedOfSize<N>>(2 * N) ? head<N>(p, x) : large<N>(p, x);
|
||||
}
|
||||
|
||||
// selected when "u" is one byte
|
||||
template <>
|
||||
ALWAYS_INLINE inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
if (u < 10)
|
||||
return outDigit(p, u);
|
||||
else if (u < 100)
|
||||
return outTwoDigits(p, u);
|
||||
else
|
||||
{
|
||||
p = outDigit(p, u / 100);
|
||||
p = outTwoDigits(p, u % 100);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle unsigned and signed integral operands
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
||||
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
|
||||
ALWAYS_INLINE inline char * itoa(U u, char * p)
|
||||
{
|
||||
return convert::uitoa(p, u);
|
||||
}
|
||||
|
||||
// itoa: handle signed integral operands (selected by SFINAE)
|
||||
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
|
||||
ALWAYS_INLINE inline char * itoa(I i, char * p)
|
||||
{
|
||||
// Need "mask" to be filled with a copy of the sign bit.
|
||||
// If "i" is a negative value, then the result of "operator >>"
|
||||
// is implementation-defined, though usually it is an arithmetic
|
||||
// right shift that replicates the sign bit.
|
||||
// Use a conditional expression to be portable,
|
||||
// a good optimizing compiler generates an arithmetic right shift
|
||||
// and avoids the conditional branch.
|
||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
||||
// Cannot use std::abs() because the result is undefined
|
||||
// in 2's complement systems for the most-negative value.
|
||||
// Want to avoid conditional branch for performance reasons since
|
||||
// CPU branch prediction will be ineffective when negative values
|
||||
// occur randomly.
|
||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
||||
// This yields the absolute value with the desired type without
|
||||
// using a conditional branch and without invoking undefined or
|
||||
// implementation defined behavior:
|
||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
||||
// Unconditionally store a minus sign when producing digits
|
||||
// in a forward direction and increment the pointer only if
|
||||
// the value is in fact negative.
|
||||
// This avoids a conditional branch and is safe because we will
|
||||
// always produce at least one digit and it will overwrite the
|
||||
// minus sign when the value is not negative.
|
||||
*p = '-';
|
||||
p += (mask & 1);
|
||||
p = convert::uitoa(p, u);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
const uint64_t max_multiple_of_hundred_that_fits_in_64_bits = 1'00'00'00'00'00'00'00'00'00ull;
|
||||
const int max_multiple_of_hundred_blocks = 9;
|
||||
static_assert(max_multiple_of_hundred_that_fits_in_64_bits % 100 == 0);
|
||||
|
||||
ALWAYS_INLINE inline char * writeUIntText(UInt128 _x, char * p)
|
||||
{
|
||||
/// If we the highest 64bit item is empty, we can print just the lowest item as u64
|
||||
if (_x.items[UInt128::_impl::little(1)] == 0)
|
||||
return convert::itoa(_x.items[UInt128::_impl::little(0)], p);
|
||||
|
||||
/// Doing operations using __int128 is faster and we already rely on this feature
|
||||
using T = unsigned __int128;
|
||||
T x = (T(_x.items[UInt128::_impl::little(1)]) << 64) + T(_x.items[UInt128::_impl::little(0)]);
|
||||
|
||||
/// We are going to accumulate blocks of 2 digits to print until the number is small enough to be printed as u64
|
||||
/// To do this we could do: x / 100, x % 100
|
||||
/// But these would mean doing many iterations with long integers, so instead we divide by a much longer integer
|
||||
/// multiple of 100 (100^9) and then get the blocks out of it (as u64)
|
||||
/// Once we reach u64::max we can stop and use the fast method to print that in the front
|
||||
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
|
||||
static const T largest_uint64 = std::numeric_limits<uint64_t>::max();
|
||||
uint8_t two_values[20] = {0}; // 39 Max characters / 2
|
||||
|
||||
int current_block = 0;
|
||||
while (x > largest_uint64)
|
||||
{
|
||||
uint64_t u64_remainder = uint64_t(x % large_divisor);
|
||||
x /= large_divisor;
|
||||
|
||||
int pos = current_block;
|
||||
while (u64_remainder)
|
||||
{
|
||||
two_values[pos] = uint8_t(u64_remainder % 100);
|
||||
pos++;
|
||||
u64_remainder /= 100;
|
||||
}
|
||||
current_block += max_multiple_of_hundred_blocks;
|
||||
}
|
||||
|
||||
char * highest_part_print = convert::itoa(uint64_t(x), p);
|
||||
for (int i = 0; i < current_block; i++)
|
||||
{
|
||||
outTwoDigits(highest_part_print, two_values[current_block - 1 - i]);
|
||||
highest_part_print += 2;
|
||||
}
|
||||
|
||||
return highest_part_print;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * writeUIntText(UInt256 _x, char * p)
|
||||
{
|
||||
/// If possible, treat it as a smaller integer as they are much faster to print
|
||||
if (_x.items[UInt256::_impl::little(3)] == 0 && _x.items[UInt256::_impl::little(2)] == 0)
|
||||
return writeUIntText(UInt128{_x.items[UInt256::_impl::little(0)], _x.items[UInt256::_impl::little(1)]}, p);
|
||||
|
||||
/// If available (x86) we transform from our custom class to _BitInt(256) which has better support in the compiler
|
||||
/// and produces better code
|
||||
using T =
|
||||
#if defined(__x86_64__)
|
||||
# pragma clang diagnostic push
|
||||
# pragma clang diagnostic ignored "-Wbit-int-extension"
|
||||
unsigned _BitInt(256)
|
||||
# pragma clang diagnostic pop
|
||||
#else
|
||||
UInt256
|
||||
#endif
|
||||
;
|
||||
|
||||
#if defined(__x86_64__)
|
||||
T x = (T(_x.items[UInt256::_impl::little(3)]) << 192) + (T(_x.items[UInt256::_impl::little(2)]) << 128)
|
||||
+ (T(_x.items[UInt256::_impl::little(1)]) << 64) + T(_x.items[UInt256::_impl::little(0)]);
|
||||
#else
|
||||
T x = _x;
|
||||
#endif
|
||||
|
||||
/// Similar to writeUIntText(UInt128) only that in this case we will stop as soon as we reach the largest u128
|
||||
/// and switch to that function
|
||||
uint8_t two_values[39] = {0}; // 78 Max characters / 2
|
||||
int current_pos = 0;
|
||||
|
||||
static const T large_divisor = max_multiple_of_hundred_that_fits_in_64_bits;
|
||||
static const T largest_uint128 = T(std::numeric_limits<uint64_t>::max()) << 64 | T(std::numeric_limits<uint64_t>::max());
|
||||
|
||||
while (x > largest_uint128)
|
||||
{
|
||||
uint64_t u64_remainder = uint64_t(x % large_divisor);
|
||||
x /= large_divisor;
|
||||
|
||||
int pos = current_pos;
|
||||
while (u64_remainder)
|
||||
{
|
||||
two_values[pos] = uint8_t(u64_remainder % 100);
|
||||
pos++;
|
||||
u64_remainder /= 100;
|
||||
}
|
||||
current_pos += max_multiple_of_hundred_blocks;
|
||||
}
|
||||
|
||||
#if defined(__x86_64__)
|
||||
UInt128 pending{uint64_t(x), uint64_t(x >> 64)};
|
||||
#else
|
||||
UInt128 pending{x.items[UInt256::_impl::little(0)], x.items[UInt256::_impl::little(1)]};
|
||||
#endif
|
||||
|
||||
char * highest_part_print = writeUIntText(pending, p);
|
||||
for (int i = 0; i < current_pos; i++)
|
||||
{
|
||||
outTwoDigits(highest_part_print, two_values[current_pos - 1 - i]);
|
||||
highest_part_print += 2;
|
||||
}
|
||||
|
||||
return highest_part_print;
|
||||
}
|
||||
|
||||
ALWAYS_INLINE inline char * writeLeadingMinus(char * pos)
|
||||
{
|
||||
*pos = '-';
|
||||
return pos + 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
ALWAYS_INLINE inline char * writeSIntText(T x, char * pos)
|
||||
{
|
||||
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
|
||||
|
||||
using UnsignedT = make_unsigned_t<T>;
|
||||
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
|
||||
|
||||
if (unlikely(x == min_int))
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int128>)
|
||||
{
|
||||
const char * res = "-170141183460469231731687303715884105728";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, Int256>)
|
||||
{
|
||||
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
}
|
||||
|
||||
if (x < 0)
|
||||
{
|
||||
x = -x;
|
||||
pos = writeLeadingMinus(pos);
|
||||
}
|
||||
return writeUIntText(UnsignedT(x), pos);
|
||||
}
|
||||
}
|
||||
|
||||
char * itoa(UInt8 i, char * p)
|
||||
{
|
||||
return convert::itoa(uint8_t(i), p);
|
||||
}
|
||||
|
||||
char * itoa(Int8 i, char * p)
|
||||
{
|
||||
return convert::itoa(int8_t(i), p);
|
||||
}
|
||||
|
||||
char * itoa(UInt128 i, char * p)
|
||||
{
|
||||
return writeUIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(Int128 i, char * p)
|
||||
{
|
||||
return writeSIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(UInt256 i, char * p)
|
||||
{
|
||||
return writeUIntText(i, p);
|
||||
}
|
||||
|
||||
char * itoa(Int256 i, char * p)
|
||||
{
|
||||
return writeSIntText(i, p);
|
||||
}
|
||||
|
||||
#define DEFAULT_ITOA(T) \
|
||||
char * itoa(T i, char * p) \
|
||||
{ \
|
||||
return convert::itoa(i, p); \
|
||||
}
|
||||
|
||||
#define FOR_MISSING_INTEGER_TYPES(M) \
|
||||
M(uint8_t) \
|
||||
M(UInt16) \
|
||||
M(UInt32) \
|
||||
M(UInt64) \
|
||||
M(int8_t) \
|
||||
M(Int16) \
|
||||
M(Int32) \
|
||||
M(Int64)
|
||||
|
||||
FOR_MISSING_INTEGER_TYPES(DEFAULT_ITOA)
|
||||
|
||||
#if defined(OS_DARWIN)
|
||||
DEFAULT_ITOA(unsigned long)
|
||||
DEFAULT_ITOA(long)
|
||||
#endif
|
||||
|
||||
#undef FOR_MISSING_INTEGER_TYPES
|
||||
#undef DEFAULT_ITOA
|
462
base/base/itoa.h
462
base/base/itoa.h
@ -1,446 +1,30 @@
|
||||
#pragma once
|
||||
|
||||
// Based on https://github.com/amdn/itoa and combined with our optimizations
|
||||
//
|
||||
//=== itoa.h - Fast integer to ascii conversion --*- C++ -*-//
|
||||
//
|
||||
// The MIT License (MIT)
|
||||
// Copyright (c) 2016 Arturo Martin-de-Nicolas
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
// in the Software without restriction, including without limitation the rights
|
||||
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
// copies of the Software, and to permit persons to whom the Software is
|
||||
// furnished to do so, subject to the following conditions:
|
||||
//
|
||||
// The above copyright notice and this permission notice shall be included
|
||||
// in all copies or substantial portions of the Software.
|
||||
//
|
||||
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
// SOFTWARE.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include <cstdint>
|
||||
#include <cstddef>
|
||||
#include <cstring>
|
||||
#include <type_traits>
|
||||
#include <base/extended_types.h>
|
||||
|
||||
#define FOR_INTEGER_TYPES(M) \
|
||||
M(uint8_t) \
|
||||
M(UInt8) \
|
||||
M(UInt16) \
|
||||
M(UInt32) \
|
||||
M(UInt64) \
|
||||
M(UInt128) \
|
||||
M(UInt256) \
|
||||
M(int8_t) \
|
||||
M(Int8) \
|
||||
M(Int16) \
|
||||
M(Int32) \
|
||||
M(Int64) \
|
||||
M(Int128) \
|
||||
M(Int256)
|
||||
|
||||
template <typename T>
|
||||
inline int digits10(T x)
|
||||
{
|
||||
if (x < 10ULL)
|
||||
return 1;
|
||||
if (x < 100ULL)
|
||||
return 2;
|
||||
if (x < 1000ULL)
|
||||
return 3;
|
||||
#define INSTANTIATION(T) char * itoa(T i, char * p);
|
||||
FOR_INTEGER_TYPES(INSTANTIATION)
|
||||
|
||||
if (x < 1000000000000ULL)
|
||||
{
|
||||
if (x < 100000000ULL)
|
||||
{
|
||||
if (x < 1000000ULL)
|
||||
{
|
||||
if (x < 10000ULL)
|
||||
return 4;
|
||||
else
|
||||
return 5 + (x >= 100000ULL);
|
||||
}
|
||||
#if defined(OS_DARWIN)
|
||||
INSTANTIATION(unsigned long)
|
||||
INSTANTIATION(long)
|
||||
#endif
|
||||
|
||||
return 7 + (x >= 10000000ULL);
|
||||
}
|
||||
|
||||
if (x < 10000000000ULL)
|
||||
return 9 + (x >= 1000000000ULL);
|
||||
|
||||
return 11 + (x >= 100000000000ULL);
|
||||
}
|
||||
|
||||
return 12 + digits10(x / 1000000000000ULL);
|
||||
}
|
||||
|
||||
|
||||
namespace impl
|
||||
{
|
||||
|
||||
template <typename T>
|
||||
static constexpr T pow10(size_t x)
|
||||
{
|
||||
return x ? 10 * pow10<T>(x - 1) : 1;
|
||||
}
|
||||
|
||||
// Division by a power of 10 is implemented using a multiplicative inverse.
|
||||
// This strength reduction is also done by optimizing compilers, but
|
||||
// presently the fastest results are produced by using the values
|
||||
// for the multiplication and the shift as given by the algorithm
|
||||
// described by Agner Fog in "Optimizing Subroutines in Assembly Language"
|
||||
//
|
||||
// http://www.agner.org/optimize/optimizing_assembly.pdf
|
||||
//
|
||||
// "Integer division by a constant (all processors)
|
||||
// A floating point number can be divided by a constant by multiplying
|
||||
// with the reciprocal. If we want to do the same with integers, we have
|
||||
// to scale the reciprocal by 2n and then shift the product to the right
|
||||
// by n. There are various algorithms for finding a suitable value of n
|
||||
// and compensating for rounding errors. The algorithm described below
|
||||
// was invented by Terje Mathisen, Norway, and not published elsewhere."
|
||||
|
||||
/// Division by constant is performed by:
|
||||
/// 1. Adding 1 if needed;
|
||||
/// 2. Multiplying by another constant;
|
||||
/// 3. Shifting right by another constant.
|
||||
template <typename UInt, bool add_, UInt multiplier_, unsigned shift_>
|
||||
struct Division
|
||||
{
|
||||
static constexpr bool add{add_};
|
||||
static constexpr UInt multiplier{multiplier_};
|
||||
static constexpr unsigned shift{shift_};
|
||||
};
|
||||
|
||||
/// Select a type with appropriate number of bytes from the list of types.
|
||||
/// First parameter is the number of bytes requested. Then goes a list of types with 1, 2, 4, ... number of bytes.
|
||||
/// Example: SelectType<4, uint8_t, uint16_t, uint32_t, uint64_t> will select uint32_t.
|
||||
template <size_t N, typename T, typename... Ts>
|
||||
struct SelectType
|
||||
{
|
||||
using Result = typename SelectType<N / 2, Ts...>::Result;
|
||||
};
|
||||
|
||||
template <typename T, typename... Ts>
|
||||
struct SelectType<1, T, Ts...>
|
||||
{
|
||||
using Result = T;
|
||||
};
|
||||
|
||||
|
||||
/// Division by 10^N where N is the size of the type.
|
||||
template <size_t N>
|
||||
using DivisionBy10PowN = typename SelectType
|
||||
<
|
||||
N,
|
||||
Division<uint8_t, false, 205U, 11>, /// divide by 10
|
||||
Division<uint16_t, true, 41943U, 22>, /// divide by 100
|
||||
Division<uint32_t, false, 3518437209U, 45>, /// divide by 10000
|
||||
Division<uint64_t, false, 12379400392853802749ULL, 90> /// divide by 100000000
|
||||
>::Result;
|
||||
|
||||
template <size_t N>
|
||||
using UnsignedOfSize = typename SelectType
|
||||
<
|
||||
N,
|
||||
uint8_t,
|
||||
uint16_t,
|
||||
uint32_t,
|
||||
uint64_t,
|
||||
__uint128_t
|
||||
>::Result;
|
||||
|
||||
/// Holds the result of dividing an unsigned N-byte variable by 10^N resulting in
|
||||
template <size_t N>
|
||||
struct QuotientAndRemainder
|
||||
{
|
||||
UnsignedOfSize<N> quotient; // quotient with fewer than 2*N decimal digits
|
||||
UnsignedOfSize<N / 2> remainder; // remainder with at most N decimal digits
|
||||
};
|
||||
|
||||
template <size_t N>
|
||||
QuotientAndRemainder<N> static inline split(UnsignedOfSize<N> value)
|
||||
{
|
||||
constexpr DivisionBy10PowN<N> division;
|
||||
|
||||
UnsignedOfSize<N> quotient = (division.multiplier * (UnsignedOfSize<2 * N>(value) + division.add)) >> division.shift;
|
||||
UnsignedOfSize<N / 2> remainder = static_cast<UnsignedOfSize<N / 2>>(value - quotient * pow10<UnsignedOfSize<N / 2>>(N));
|
||||
|
||||
return {quotient, remainder};
|
||||
}
|
||||
|
||||
|
||||
static inline char * outDigit(char * p, uint8_t value)
|
||||
{
|
||||
*p = '0' + value;
|
||||
++p;
|
||||
return p;
|
||||
}
|
||||
|
||||
// Using a lookup table to convert binary numbers from 0 to 99
|
||||
// into ascii characters as described by Andrei Alexandrescu in
|
||||
// https://www.facebook.com/notes/facebook-engineering/three-optimization-tips-for-c/10151361643253920/
|
||||
|
||||
static const char digits[201] = "00010203040506070809"
|
||||
"10111213141516171819"
|
||||
"20212223242526272829"
|
||||
"30313233343536373839"
|
||||
"40414243444546474849"
|
||||
"50515253545556575859"
|
||||
"60616263646566676869"
|
||||
"70717273747576777879"
|
||||
"80818283848586878889"
|
||||
"90919293949596979899";
|
||||
|
||||
static inline char * outTwoDigits(char * p, uint8_t value)
|
||||
{
|
||||
memcpy(p, &digits[value * 2], 2);
|
||||
p += 2;
|
||||
return p;
|
||||
}
|
||||
|
||||
|
||||
namespace convert
|
||||
{
|
||||
template <typename UInt, size_t N = sizeof(UInt)> static char * head(char * p, UInt u);
|
||||
template <typename UInt, size_t N = sizeof(UInt)> static char * tail(char * p, UInt u);
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// head: find most significant digit, skip leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// "x" contains quotient and remainder after division by 10^N
|
||||
// quotient is less than 10^N
|
||||
template <size_t N>
|
||||
static inline char * head(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
p = head(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
static inline char * head(char * p, UInt u)
|
||||
{
|
||||
return u < pow10<UnsignedOfSize<N>>(N)
|
||||
? head(p, UnsignedOfSize<N / 2>(u))
|
||||
: head<N>(p, split<N>(u));
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
inline char * head<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return u < 10
|
||||
? outDigit(p, u)
|
||||
: outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// tail: produce all digits including leading zeros
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// recursive step, "u" is less than 10^2*N
|
||||
template <typename UInt, size_t N>
|
||||
static inline char * tail(char * p, UInt u)
|
||||
{
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
p = tail(p, UnsignedOfSize<N / 2>(x.quotient));
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
// recursion base case, selected when "u" is one byte
|
||||
template <>
|
||||
inline char * tail<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
return outTwoDigits(p, u);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// large values are >= 10^2*N
|
||||
// where x contains quotient and remainder after division by 10^N
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
template <size_t N>
|
||||
static inline char * large(char * p, QuotientAndRemainder<N> x)
|
||||
{
|
||||
QuotientAndRemainder<N> y = split<N>(x.quotient);
|
||||
p = head(p, UnsignedOfSize<N / 2>(y.quotient));
|
||||
p = tail(p, y.remainder);
|
||||
p = tail(p, x.remainder);
|
||||
return p;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle values of "u" that might be >= 10^2*N
|
||||
// where N is the size of "u" in bytes
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
template <typename UInt, size_t N = sizeof(UInt)>
|
||||
static inline char * uitoa(char * p, UInt u)
|
||||
{
|
||||
if (u < pow10<UnsignedOfSize<N>>(N))
|
||||
return head(p, UnsignedOfSize<N / 2>(u));
|
||||
QuotientAndRemainder<N> x = split<N>(u);
|
||||
|
||||
return u < pow10<UnsignedOfSize<N>>(2 * N)
|
||||
? head<N>(p, x)
|
||||
: large<N>(p, x);
|
||||
}
|
||||
|
||||
// selected when "u" is one byte
|
||||
template <>
|
||||
inline char * uitoa<UnsignedOfSize<1>, 1>(char * p, UnsignedOfSize<1> u)
|
||||
{
|
||||
if (u < 10)
|
||||
return outDigit(p, u);
|
||||
else if (u < 100)
|
||||
return outTwoDigits(p, u);
|
||||
else
|
||||
{
|
||||
p = outDigit(p, u / 100);
|
||||
p = outTwoDigits(p, u % 100);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------===//
|
||||
// handle unsigned and signed integral operands
|
||||
//===----------------------------------------------------------===//
|
||||
|
||||
// itoa: handle unsigned integral operands (selected by SFINAE)
|
||||
template <typename U, std::enable_if_t<!std::is_signed_v<U> && std::is_integral_v<U>> * = nullptr>
|
||||
static inline char * itoa(U u, char * p)
|
||||
{
|
||||
return convert::uitoa(p, u);
|
||||
}
|
||||
|
||||
// itoa: handle signed integral operands (selected by SFINAE)
|
||||
template <typename I, size_t N = sizeof(I), std::enable_if_t<std::is_signed_v<I> && std::is_integral_v<I>> * = nullptr>
|
||||
static inline char * itoa(I i, char * p)
|
||||
{
|
||||
// Need "mask" to be filled with a copy of the sign bit.
|
||||
// If "i" is a negative value, then the result of "operator >>"
|
||||
// is implementation-defined, though usually it is an arithmetic
|
||||
// right shift that replicates the sign bit.
|
||||
// Use a conditional expression to be portable,
|
||||
// a good optimizing compiler generates an arithmetic right shift
|
||||
// and avoids the conditional branch.
|
||||
UnsignedOfSize<N> mask = i < 0 ? ~UnsignedOfSize<N>(0) : 0;
|
||||
// Now get the absolute value of "i" and cast to unsigned type UnsignedOfSize<N>.
|
||||
// Cannot use std::abs() because the result is undefined
|
||||
// in 2's complement systems for the most-negative value.
|
||||
// Want to avoid conditional branch for performance reasons since
|
||||
// CPU branch prediction will be ineffective when negative values
|
||||
// occur randomly.
|
||||
// Let "u" be "i" cast to unsigned type UnsignedOfSize<N>.
|
||||
// Subtract "u" from 2*u if "i" is positive or 0 if "i" is negative.
|
||||
// This yields the absolute value with the desired type without
|
||||
// using a conditional branch and without invoking undefined or
|
||||
// implementation defined behavior:
|
||||
UnsignedOfSize<N> u = ((2 * UnsignedOfSize<N>(i)) & ~mask) - UnsignedOfSize<N>(i);
|
||||
// Unconditionally store a minus sign when producing digits
|
||||
// in a forward direction and increment the pointer only if
|
||||
// the value is in fact negative.
|
||||
// This avoids a conditional branch and is safe because we will
|
||||
// always produce at least one digit and it will overwrite the
|
||||
// minus sign when the value is not negative.
|
||||
*p = '-';
|
||||
p += (mask & 1);
|
||||
p = convert::uitoa(p, u);
|
||||
return p;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
template <typename T>
|
||||
static inline char * writeUIntText(T x, char * p)
|
||||
{
|
||||
static_assert(is_unsigned_v<T>);
|
||||
|
||||
int len = digits10(x);
|
||||
auto * pp = p + len;
|
||||
while (x >= 100)
|
||||
{
|
||||
const auto i = x % 100;
|
||||
x /= 100;
|
||||
pp -= 2;
|
||||
outTwoDigits(pp, i);
|
||||
}
|
||||
if (x < 10)
|
||||
*p = '0' + x;
|
||||
else
|
||||
outTwoDigits(p, x);
|
||||
return p + len;
|
||||
}
|
||||
|
||||
static inline char * writeLeadingMinus(char * pos)
|
||||
{
|
||||
*pos = '-';
|
||||
return pos + 1;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline char * writeSIntText(T x, char * pos)
|
||||
{
|
||||
static_assert(std::is_same_v<T, Int128> || std::is_same_v<T, Int256>);
|
||||
|
||||
using UnsignedT = make_unsigned_t<T>;
|
||||
static constexpr T min_int = UnsignedT(1) << (sizeof(T) * 8 - 1);
|
||||
|
||||
if (unlikely(x == min_int))
|
||||
{
|
||||
if constexpr (std::is_same_v<T, Int128>)
|
||||
{
|
||||
const char * res = "-170141183460469231731687303715884105728";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
else if constexpr (std::is_same_v<T, Int256>)
|
||||
{
|
||||
const char * res = "-57896044618658097711785492504343953926634992332820282019728792003956564819968";
|
||||
memcpy(pos, res, strlen(res));
|
||||
return pos + strlen(res);
|
||||
}
|
||||
}
|
||||
|
||||
if (x < 0)
|
||||
{
|
||||
x = -x;
|
||||
pos = writeLeadingMinus(pos);
|
||||
}
|
||||
return writeUIntText(UnsignedT(x), pos);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
template <typename I>
|
||||
char * itoa(I i, char * p)
|
||||
{
|
||||
return impl::convert::itoa(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(char8_t i, char * p)
|
||||
{
|
||||
return impl::convert::itoa(uint8_t(i), p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(UInt128 i, char * p)
|
||||
{
|
||||
return impl::writeUIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(Int128 i, char * p)
|
||||
{
|
||||
return impl::writeSIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(UInt256 i, char * p)
|
||||
{
|
||||
return impl::writeUIntText(i, p);
|
||||
}
|
||||
|
||||
template <>
|
||||
inline char * itoa(Int256 i, char * p)
|
||||
{
|
||||
return impl::writeSIntText(i, p);
|
||||
}
|
||||
#undef FOR_INTEGER_TYPES
|
||||
#undef INSTANTIATION
|
||||
|
75
base/poco/Foundation/include/Poco/FPEnvironment_SUN.h
Normal file
75
base/poco/Foundation/include/Poco/FPEnvironment_SUN.h
Normal file
@ -0,0 +1,75 @@
|
||||
//
|
||||
// FPEnvironment_SUN.h
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: FPEnvironment
|
||||
//
|
||||
// Definitions of class FPEnvironmentImpl for Solaris.
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#ifndef Foundation_FPEnvironment_SUN_INCLUDED
|
||||
#define Foundation_FPEnvironment_SUN_INCLUDED
|
||||
|
||||
|
||||
#include <ieeefp.h>
|
||||
#include "Poco/Foundation.h"
|
||||
|
||||
|
||||
namespace Poco
|
||||
{
|
||||
|
||||
|
||||
class FPEnvironmentImpl
|
||||
{
|
||||
protected:
|
||||
enum RoundingModeImpl
|
||||
{
|
||||
FP_ROUND_DOWNWARD_IMPL = FP_RM,
|
||||
FP_ROUND_UPWARD_IMPL = FP_RP,
|
||||
FP_ROUND_TONEAREST_IMPL = FP_RN,
|
||||
FP_ROUND_TOWARDZERO_IMPL = FP_RZ
|
||||
};
|
||||
enum FlagImpl
|
||||
{
|
||||
FP_DIVIDE_BY_ZERO_IMPL = FP_X_DZ,
|
||||
FP_INEXACT_IMPL = FP_X_IMP,
|
||||
FP_OVERFLOW_IMPL = FP_X_OFL,
|
||||
FP_UNDERFLOW_IMPL = FP_X_UFL,
|
||||
FP_INVALID_IMPL = FP_X_INV
|
||||
};
|
||||
FPEnvironmentImpl();
|
||||
FPEnvironmentImpl(const FPEnvironmentImpl & env);
|
||||
~FPEnvironmentImpl();
|
||||
FPEnvironmentImpl & operator=(const FPEnvironmentImpl & env);
|
||||
void keepCurrentImpl();
|
||||
static void clearFlagsImpl();
|
||||
static bool isFlagImpl(FlagImpl flag);
|
||||
static void setRoundingModeImpl(RoundingModeImpl mode);
|
||||
static RoundingModeImpl getRoundingModeImpl();
|
||||
static bool isInfiniteImpl(float value);
|
||||
static bool isInfiniteImpl(double value);
|
||||
static bool isInfiniteImpl(long double value);
|
||||
static bool isNaNImpl(float value);
|
||||
static bool isNaNImpl(double value);
|
||||
static bool isNaNImpl(long double value);
|
||||
static float copySignImpl(float target, float source);
|
||||
static double copySignImpl(double target, double source);
|
||||
static long double copySignImpl(long double target, long double source);
|
||||
|
||||
private:
|
||||
fp_rnd _rnd;
|
||||
fp_except _exc;
|
||||
};
|
||||
|
||||
|
||||
} // namespace Poco
|
||||
|
||||
|
||||
#endif // Foundation_FPEnvironment_SUN_INCLUDED
|
@ -281,15 +281,15 @@ void EnvironmentImpl::nodeIdImpl(NodeId& id)
|
||||
/// #include <sys/ioctl.h>
|
||||
#if defined(sun) || defined(__sun)
|
||||
#include <sys/sockio.h>
|
||||
#include <netdb.h>
|
||||
#include <net/if.h>
|
||||
#include <net/if_arp.h>
|
||||
#endif
|
||||
/// #include <sys/socket.h>
|
||||
/// #include <sys/types.h>
|
||||
/// #include <netinet/in.h>
|
||||
/// #include <net/if.h>
|
||||
/// #include <arpa/inet.h>
|
||||
/// #include <netdb.h>
|
||||
/// #include <net/if.h>
|
||||
/// #include <net/if_arp.h>
|
||||
/// #include <unistd.h>
|
||||
|
||||
|
||||
|
139
base/poco/Foundation/src/FPEnvironment_SUN.cpp
Normal file
139
base/poco/Foundation/src/FPEnvironment_SUN.cpp
Normal file
@ -0,0 +1,139 @@
|
||||
//
|
||||
// FPEnvironment_SUN.cpp
|
||||
//
|
||||
// Library: Foundation
|
||||
// Package: Core
|
||||
// Module: FPEnvironment
|
||||
//
|
||||
// Copyright (c) 2005-2006, Applied Informatics Software Engineering GmbH.
|
||||
// and Contributors.
|
||||
//
|
||||
// SPDX-License-Identifier: BSL-1.0
|
||||
//
|
||||
|
||||
|
||||
#include <math.h>
|
||||
#include "Poco/FPEnvironment_SUN.h"
|
||||
|
||||
|
||||
namespace Poco {
|
||||
|
||||
|
||||
FPEnvironmentImpl::FPEnvironmentImpl()
|
||||
{
|
||||
_rnd = fpgetround();
|
||||
_exc = fpgetmask();
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::FPEnvironmentImpl(const FPEnvironmentImpl& env)
|
||||
{
|
||||
_rnd = env._rnd;
|
||||
_exc = env._exc;
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::~FPEnvironmentImpl()
|
||||
{
|
||||
fpsetround(_rnd);
|
||||
fpsetmask(_exc);
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl& FPEnvironmentImpl::operator = (const FPEnvironmentImpl& env)
|
||||
{
|
||||
_rnd = env._rnd;
|
||||
_exc = env._exc;
|
||||
return *this;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(float value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(double value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isInfiniteImpl(long double value)
|
||||
{
|
||||
int cls = fpclass(value);
|
||||
return cls == FP_PINF || cls == FP_NINF;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(float value)
|
||||
{
|
||||
return isnanf(value) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(double value)
|
||||
{
|
||||
return isnan(value) != 0;
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isNaNImpl(long double value)
|
||||
{
|
||||
return isnan((double) value) != 0;
|
||||
}
|
||||
|
||||
|
||||
float FPEnvironmentImpl::copySignImpl(float target, float source)
|
||||
{
|
||||
return (float) copysign(target, source);
|
||||
}
|
||||
|
||||
|
||||
double FPEnvironmentImpl::copySignImpl(double target, double source)
|
||||
{
|
||||
return (float) copysign(target, source);
|
||||
}
|
||||
|
||||
|
||||
long double FPEnvironmentImpl::copySignImpl(long double target, long double source)
|
||||
{
|
||||
return (source > 0 && target > 0) || (source < 0 && target < 0) ? target : -target;
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::keepCurrentImpl()
|
||||
{
|
||||
fpsetround(_rnd);
|
||||
fpsetmask(_exc);
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::clearFlagsImpl()
|
||||
{
|
||||
fpsetsticky(0);
|
||||
}
|
||||
|
||||
|
||||
bool FPEnvironmentImpl::isFlagImpl(FlagImpl flag)
|
||||
{
|
||||
return (fpgetsticky() & flag) != 0;
|
||||
}
|
||||
|
||||
|
||||
void FPEnvironmentImpl::setRoundingModeImpl(RoundingModeImpl mode)
|
||||
{
|
||||
fpsetround((fp_rnd) mode);
|
||||
}
|
||||
|
||||
|
||||
FPEnvironmentImpl::RoundingModeImpl FPEnvironmentImpl::getRoundingModeImpl()
|
||||
{
|
||||
return (FPEnvironmentImpl::RoundingModeImpl) fpgetround();
|
||||
}
|
||||
|
||||
|
||||
} // namespace Poco
|
@ -31,7 +31,7 @@
|
||||
namespace Poco {
|
||||
|
||||
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD)
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD) || (POCO_OS == POCO_OS_SOLARIS)
|
||||
union semun
|
||||
{
|
||||
int val;
|
||||
|
@ -31,7 +31,7 @@
|
||||
namespace Poco {
|
||||
|
||||
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD)
|
||||
#if (POCO_OS == POCO_OS_LINUX) || (POCO_OS == POCO_OS_ANDROID) || (POCO_OS == POCO_OS_CYGWIN) || (POCO_OS == POCO_OS_FREE_BSD) || (POCO_OS == POCO_OS_SOLARIS)
|
||||
union semun
|
||||
{
|
||||
int val;
|
||||
|
@ -9,6 +9,10 @@ elseif (OS_DARWIN OR OS_FREEBSD)
|
||||
target_compile_definitions (_poco_net PUBLIC POCO_HAVE_FD_POLL)
|
||||
endif ()
|
||||
|
||||
if (OS_SUNOS)
|
||||
target_link_libraries (_poco_net PUBLIC socket nsl)
|
||||
endif ()
|
||||
|
||||
# TODO: remove these warning exclusions
|
||||
target_compile_options (_poco_net
|
||||
PRIVATE
|
||||
|
@ -30,7 +30,6 @@ namespace Net
|
||||
|
||||
|
||||
class HTTPServerRequest;
|
||||
class HTTPServerResponse;
|
||||
class HTTPRequestHandler;
|
||||
|
||||
|
||||
|
@ -86,6 +86,8 @@ elseif (OS_DARWIN)
|
||||
target_compile_definitions(_c-ares PRIVATE -D_DARWIN_C_SOURCE)
|
||||
elseif (OS_FREEBSD)
|
||||
target_include_directories(_c-ares SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/c-ares-cmake/freebsd")
|
||||
elseif (OS_SUNOS)
|
||||
target_include_directories(_c-ares SYSTEM PUBLIC "${ClickHouse_SOURCE_DIR}/contrib/c-ares-cmake/solaris")
|
||||
endif()
|
||||
|
||||
add_library(ch_contrib::c-ares ALIAS _c-ares)
|
||||
|
104
contrib/c-ares-cmake/solaris/ares_build.h
Normal file
104
contrib/c-ares-cmake/solaris/ares_build.h
Normal file
@ -0,0 +1,104 @@
|
||||
/* include/ares_build.h. Generated from ares_build.h.in by configure. */
|
||||
#ifndef __CARES_BUILD_H
|
||||
#define __CARES_BUILD_H
|
||||
|
||||
|
||||
/* Copyright (C) 2009 - 2021 by Daniel Stenberg et al
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software and its
|
||||
* documentation for any purpose and without fee is hereby granted, provided
|
||||
* that the above copyright notice appear in all copies and that both that
|
||||
* copyright notice and this permission notice appear in supporting
|
||||
* documentation, and that the name of M.I.T. not be used in advertising or
|
||||
* publicity pertaining to distribution of the software without specific,
|
||||
* written prior permission. M.I.T. makes no representations about the
|
||||
* suitability of this software for any purpose. It is provided "as is"
|
||||
* without express or implied warranty.
|
||||
*/
|
||||
|
||||
/* ================================================================ */
|
||||
/* NOTES FOR CONFIGURE CAPABLE SYSTEMS */
|
||||
/* ================================================================ */
|
||||
|
||||
/*
|
||||
* NOTE 1:
|
||||
* -------
|
||||
*
|
||||
* Nothing in this file is intended to be modified or adjusted by the
|
||||
* c-ares library user nor by the c-ares library builder.
|
||||
*
|
||||
* If you think that something actually needs to be changed, adjusted
|
||||
* or fixed in this file, then, report it on the c-ares development
|
||||
* mailing list: http://lists.haxx.se/listinfo/c-ares/
|
||||
*
|
||||
* This header file shall only export symbols which are 'cares' or 'CARES'
|
||||
* prefixed, otherwise public name space would be polluted.
|
||||
*
|
||||
* NOTE 2:
|
||||
* -------
|
||||
*
|
||||
* Right now you might be staring at file ares_build.h.in or ares_build.h,
|
||||
* this is due to the following reason:
|
||||
*
|
||||
* On systems capable of running the configure script, the configure process
|
||||
* will overwrite the distributed ares_build.h file with one that is suitable
|
||||
* and specific to the library being configured and built, which is generated
|
||||
* from the ares_build.h.in template file.
|
||||
*
|
||||
*/
|
||||
|
||||
/* ================================================================ */
|
||||
/* DEFINITION OF THESE SYMBOLS SHALL NOT TAKE PLACE ANYWHERE ELSE */
|
||||
/* ================================================================ */
|
||||
|
||||
#ifdef CARES_TYPEOF_ARES_SOCKLEN_T
|
||||
# error "CARES_TYPEOF_ARES_SOCKLEN_T shall not be defined except in ares_build.h"
|
||||
Error Compilation_aborted_CARES_TYPEOF_ARES_SOCKLEN_T_already_defined
|
||||
#endif
|
||||
|
||||
#define CARES_HAVE_ARPA_NAMESER_H 1
|
||||
#define CARES_HAVE_ARPA_NAMESER_COMPAT_H 1
|
||||
|
||||
/* ================================================================ */
|
||||
/* EXTERNAL INTERFACE SETTINGS FOR CONFIGURE CAPABLE SYSTEMS ONLY */
|
||||
/* ================================================================ */
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file ws2tcpip.h must be included by the external interface. */
|
||||
/* #undef CARES_PULL_WS2TCPIP_H */
|
||||
#ifdef CARES_PULL_WS2TCPIP_H
|
||||
# ifndef WIN32_LEAN_AND_MEAN
|
||||
# define WIN32_LEAN_AND_MEAN
|
||||
# endif
|
||||
# include <windows.h>
|
||||
# include <winsock2.h>
|
||||
# include <ws2tcpip.h>
|
||||
#endif
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file sys/types.h must be included by the external interface. */
|
||||
#define CARES_PULL_SYS_TYPES_H 1
|
||||
#ifdef CARES_PULL_SYS_TYPES_H
|
||||
# include <sys/types.h>
|
||||
#endif
|
||||
|
||||
/* Configure process defines this to 1 when it finds out that system */
|
||||
/* header file sys/socket.h must be included by the external interface. */
|
||||
#define CARES_PULL_SYS_SOCKET_H 1
|
||||
#ifdef CARES_PULL_SYS_SOCKET_H
|
||||
# include <sys/socket.h>
|
||||
#endif
|
||||
|
||||
/* Integral data type used for ares_socklen_t. */
|
||||
#define CARES_TYPEOF_ARES_SOCKLEN_T socklen_t
|
||||
|
||||
/* Data type definition of ares_socklen_t. */
|
||||
typedef CARES_TYPEOF_ARES_SOCKLEN_T ares_socklen_t;
|
||||
|
||||
/* Integral data type used for ares_ssize_t. */
|
||||
#define CARES_TYPEOF_ARES_SSIZE_T ssize_t
|
||||
|
||||
/* Data type definition of ares_ssize_t. */
|
||||
typedef CARES_TYPEOF_ARES_SSIZE_T ares_ssize_t;
|
||||
|
||||
#endif /* __CARES_BUILD_H */
|
503
contrib/c-ares-cmake/solaris/ares_config.h
Normal file
503
contrib/c-ares-cmake/solaris/ares_config.h
Normal file
@ -0,0 +1,503 @@
|
||||
/* src/lib/ares_config.h. Generated from ares_config.h.in by configure. */
|
||||
/* src/lib/ares_config.h.in. Generated from configure.ac by autoheader. */
|
||||
|
||||
/* Define if building universal (internal helper macro) */
|
||||
/* #undef AC_APPLE_UNIVERSAL_BUILD */
|
||||
|
||||
/* define this if ares is built for a big endian system */
|
||||
/* #undef ARES_BIG_ENDIAN */
|
||||
|
||||
/* Defined for build that exposes internal static functions for testing. */
|
||||
/* #undef CARES_EXPOSE_STATICS */
|
||||
|
||||
/* a suitable file/device to read random data from */
|
||||
#define CARES_RANDOM_FILE "/dev/urandom"
|
||||
|
||||
/* Defined for build with symbol hiding. */
|
||||
#define CARES_SYMBOL_HIDING 1
|
||||
|
||||
/* Definition to make a library symbol externally visible. */
|
||||
#define CARES_SYMBOL_SCOPE_EXTERN __attribute__ ((__visibility__ ("default")))
|
||||
|
||||
/* the signed version of size_t */
|
||||
#define CARES_TYPEOF_ARES_SSIZE_T ssize_t
|
||||
|
||||
/* Use resolver library to configure cares */
|
||||
/* #undef CARES_USE_LIBRESOLV */
|
||||
|
||||
/* if a /etc/inet dir is being used */
|
||||
#define ETC_INET 1
|
||||
|
||||
/* Define to the type of arg 2 for gethostname. */
|
||||
#define GETHOSTNAME_TYPE_ARG2 int
|
||||
|
||||
/* Define to the type qualifier of arg 1 for getnameinfo. */
|
||||
#define GETNAMEINFO_QUAL_ARG1 const
|
||||
|
||||
/* Define to the type of arg 1 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG1 struct sockaddr *
|
||||
|
||||
/* Define to the type of arg 2 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG2 socklen_t
|
||||
|
||||
/* Define to the type of args 4 and 6 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG46 socklen_t
|
||||
|
||||
/* Define to the type of arg 7 for getnameinfo. */
|
||||
#define GETNAMEINFO_TYPE_ARG7 int
|
||||
|
||||
/* Specifies the number of arguments to getservbyport_r */
|
||||
#define GETSERVBYPORT_R_ARGS 5
|
||||
|
||||
/* Specifies the size of the buffer to pass to getservbyport_r */
|
||||
#define GETSERVBYPORT_R_BUFSIZE 4096
|
||||
|
||||
/* Define to 1 if you have AF_INET6. */
|
||||
#define HAVE_AF_INET6 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/inet.h> header file. */
|
||||
#define HAVE_ARPA_INET_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser_compat.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_COMPAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <arpa/nameser.h> header file. */
|
||||
#define HAVE_ARPA_NAMESER_H 1
|
||||
|
||||
/* Define to 1 if you have the <assert.h> header file. */
|
||||
#define HAVE_ASSERT_H 1
|
||||
|
||||
/* Define to 1 if you have the `bitncmp' function. */
|
||||
/* #undef HAVE_BITNCMP */
|
||||
|
||||
/* Define to 1 if bool is an available type. */
|
||||
#define HAVE_BOOL_T 1
|
||||
|
||||
/* Define to 1 if you have the clock_gettime function and monotonic timer. */
|
||||
#define HAVE_CLOCK_GETTIME_MONOTONIC 1
|
||||
|
||||
/* Define to 1 if you have the closesocket function. */
|
||||
/* #undef HAVE_CLOSESOCKET */
|
||||
|
||||
/* Define to 1 if you have the CloseSocket camel case function. */
|
||||
/* #undef HAVE_CLOSESOCKET_CAMEL */
|
||||
|
||||
/* Define to 1 if you have the connect function. */
|
||||
#define HAVE_CONNECT 1
|
||||
|
||||
/* define if the compiler supports basic C++11 syntax */
|
||||
#define HAVE_CXX11 1
|
||||
|
||||
/* Define to 1 if you have the <dlfcn.h> header file. */
|
||||
#define HAVE_DLFCN_H 1
|
||||
|
||||
/* Define to 1 if you have the <errno.h> header file. */
|
||||
#define HAVE_ERRNO_H 1
|
||||
|
||||
/* Define to 1 if you have the fcntl function. */
|
||||
#define HAVE_FCNTL 1
|
||||
|
||||
/* Define to 1 if you have the <fcntl.h> header file. */
|
||||
#define HAVE_FCNTL_H 1
|
||||
|
||||
/* Define to 1 if you have a working fcntl O_NONBLOCK function. */
|
||||
#define HAVE_FCNTL_O_NONBLOCK 1
|
||||
|
||||
/* Define to 1 if you have the freeaddrinfo function. */
|
||||
#define HAVE_FREEADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have a working getaddrinfo function. */
|
||||
#define HAVE_GETADDRINFO 1
|
||||
|
||||
/* Define to 1 if the getaddrinfo function is threadsafe. */
|
||||
#define HAVE_GETADDRINFO_THREADSAFE 1
|
||||
|
||||
/* Define to 1 if you have the getenv function. */
|
||||
#define HAVE_GETENV 1
|
||||
|
||||
/* Define to 1 if you have the gethostbyaddr function. */
|
||||
#define HAVE_GETHOSTBYADDR 1
|
||||
|
||||
/* Define to 1 if you have the gethostbyname function. */
|
||||
#define HAVE_GETHOSTBYNAME 1
|
||||
|
||||
/* Define to 1 if you have the gethostname function. */
|
||||
#define HAVE_GETHOSTNAME 1
|
||||
|
||||
/* Define to 1 if you have the getnameinfo function. */
|
||||
#define HAVE_GETNAMEINFO 1
|
||||
|
||||
/* Define to 1 if you have the getservbyport_r function. */
|
||||
#define HAVE_GETSERVBYPORT_R 1
|
||||
|
||||
/* Define to 1 if you have the `gettimeofday' function. */
|
||||
#define HAVE_GETTIMEOFDAY 1
|
||||
|
||||
/* Define to 1 if you have the `if_indextoname' function. */
|
||||
#define HAVE_IF_INDEXTONAME 1
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_net_pton function. */
|
||||
/* #undef HAVE_INET_NET_PTON */
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_ntop function. */
|
||||
#define HAVE_INET_NTOP 1
|
||||
|
||||
/* Define to 1 if you have a IPv6 capable working inet_pton function. */
|
||||
#define HAVE_INET_PTON 1
|
||||
|
||||
/* Define to 1 if you have the <inttypes.h> header file. */
|
||||
#define HAVE_INTTYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the ioctl function. */
|
||||
#define HAVE_IOCTL 1
|
||||
|
||||
/* Define to 1 if you have the ioctlsocket function. */
|
||||
/* #undef HAVE_IOCTLSOCKET */
|
||||
|
||||
/* Define to 1 if you have the IoctlSocket camel case function. */
|
||||
/* #undef HAVE_IOCTLSOCKET_CAMEL */
|
||||
|
||||
/* Define to 1 if you have a working IoctlSocket camel case FIONBIO function.
|
||||
*/
|
||||
/* #undef HAVE_IOCTLSOCKET_CAMEL_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctlsocket FIONBIO function. */
|
||||
/* #undef HAVE_IOCTLSOCKET_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctl FIONBIO function. */
|
||||
/* #undef HAVE_IOCTL_FIONBIO */
|
||||
|
||||
/* Define to 1 if you have a working ioctl SIOCGIFADDR function. */
|
||||
/* #undef HAVE_IOCTL_SIOCGIFADDR */
|
||||
|
||||
/* Define to 1 if you have the `resolve' library (-lresolve). */
|
||||
/* #undef HAVE_LIBRESOLVE */
|
||||
|
||||
/* Define to 1 if you have the <limits.h> header file. */
|
||||
#define HAVE_LIMITS_H 1
|
||||
|
||||
/* if your compiler supports LL */
|
||||
#define HAVE_LL 1
|
||||
|
||||
/* Define to 1 if the compiler supports the 'long long' data type. */
|
||||
#define HAVE_LONGLONG 1
|
||||
|
||||
/* Define to 1 if you have the malloc.h header file. */
|
||||
#define HAVE_MALLOC_H 1
|
||||
|
||||
/* Define to 1 if you have the memory.h header file. */
|
||||
#define HAVE_MEMORY_H 1
|
||||
|
||||
/* Define to 1 if you have the MSG_NOSIGNAL flag. */
|
||||
#define HAVE_MSG_NOSIGNAL 1
|
||||
|
||||
/* Define to 1 if you have the <netdb.h> header file. */
|
||||
#define HAVE_NETDB_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/in.h> header file. */
|
||||
#define HAVE_NETINET_IN_H 1
|
||||
|
||||
/* Define to 1 if you have the <netinet/tcp.h> header file. */
|
||||
#define HAVE_NETINET_TCP_H 1
|
||||
|
||||
/* Define to 1 if you have the <net/if.h> header file. */
|
||||
#define HAVE_NET_IF_H 1
|
||||
|
||||
/* Define to 1 if you have PF_INET6. */
|
||||
#define HAVE_PF_INET6 1
|
||||
|
||||
/* Define to 1 if you have the recv function. */
|
||||
#define HAVE_RECV 1
|
||||
|
||||
/* Define to 1 if you have the recvfrom function. */
|
||||
#define HAVE_RECVFROM 1
|
||||
|
||||
/* Define to 1 if you have the send function. */
|
||||
#define HAVE_SEND 1
|
||||
|
||||
/* Define to 1 if you have the setsockopt function. */
|
||||
#define HAVE_SETSOCKOPT 1
|
||||
|
||||
/* Define to 1 if you have a working setsockopt SO_NONBLOCK function. */
|
||||
/* #undef HAVE_SETSOCKOPT_SO_NONBLOCK */
|
||||
|
||||
/* Define to 1 if you have the <signal.h> header file. */
|
||||
#define HAVE_SIGNAL_H 1
|
||||
|
||||
/* Define to 1 if sig_atomic_t is an available typedef. */
|
||||
#define HAVE_SIG_ATOMIC_T 1
|
||||
|
||||
/* Define to 1 if sig_atomic_t is already defined as volatile. */
|
||||
/* #undef HAVE_SIG_ATOMIC_T_VOLATILE */
|
||||
|
||||
/* Define to 1 if your struct sockaddr_in6 has sin6_scope_id. */
|
||||
#define HAVE_SOCKADDR_IN6_SIN6_SCOPE_ID 1
|
||||
|
||||
/* Define to 1 if you have the socket function. */
|
||||
#define HAVE_SOCKET 1
|
||||
|
||||
/* Define to 1 if you have the <socket.h> header file. */
|
||||
/* #undef HAVE_SOCKET_H */
|
||||
|
||||
/* Define to 1 if you have the <stdbool.h> header file. */
|
||||
#define HAVE_STDBOOL_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdint.h> header file. */
|
||||
#define HAVE_STDINT_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdio.h> header file. */
|
||||
#define HAVE_STDIO_H 1
|
||||
|
||||
/* Define to 1 if you have the <stdlib.h> header file. */
|
||||
#define HAVE_STDLIB_H 1
|
||||
|
||||
/* Define to 1 if you have the strcasecmp function. */
|
||||
#define HAVE_STRCASECMP 1
|
||||
|
||||
/* Define to 1 if you have the strcmpi function. */
|
||||
/* #undef HAVE_STRCMPI */
|
||||
|
||||
/* Define to 1 if you have the strdup function. */
|
||||
#define HAVE_STRDUP 1
|
||||
|
||||
/* Define to 1 if you have the stricmp function. */
|
||||
/* #undef HAVE_STRICMP */
|
||||
|
||||
/* Define to 1 if you have the <strings.h> header file. */
|
||||
#define HAVE_STRINGS_H 1
|
||||
|
||||
/* Define to 1 if you have the <string.h> header file. */
|
||||
#define HAVE_STRING_H 1
|
||||
|
||||
/* Define to 1 if you have the strncasecmp function. */
|
||||
#define HAVE_STRNCASECMP 1
|
||||
|
||||
/* Define to 1 if you have the strncmpi function. */
|
||||
/* #undef HAVE_STRNCMPI */
|
||||
|
||||
/* Define to 1 if you have the strnicmp function. */
|
||||
/* #undef HAVE_STRNICMP */
|
||||
|
||||
/* Define to 1 if you have the <stropts.h> header file. */
|
||||
#define HAVE_STROPTS_H 1
|
||||
|
||||
/* Define to 1 if you have struct addrinfo. */
|
||||
#define HAVE_STRUCT_ADDRINFO 1
|
||||
|
||||
/* Define to 1 if you have struct in6_addr. */
|
||||
#define HAVE_STRUCT_IN6_ADDR 1
|
||||
|
||||
/* Define to 1 if you have struct sockaddr_in6. */
|
||||
#define HAVE_STRUCT_SOCKADDR_IN6 1
|
||||
|
||||
/* if struct sockaddr_storage is defined */
|
||||
#define HAVE_STRUCT_SOCKADDR_STORAGE 1
|
||||
|
||||
/* Define to 1 if you have the timeval struct. */
|
||||
#define HAVE_STRUCT_TIMEVAL 1
|
||||
|
||||
/* Define to 1 if you have the <sys/ioctl.h> header file. */
|
||||
#define HAVE_SYS_IOCTL_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/param.h> header file. */
|
||||
#define HAVE_SYS_PARAM_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/select.h> header file. */
|
||||
#define HAVE_SYS_SELECT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/socket.h> header file. */
|
||||
#define HAVE_SYS_SOCKET_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/stat.h> header file. */
|
||||
#define HAVE_SYS_STAT_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/time.h> header file. */
|
||||
#define HAVE_SYS_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/types.h> header file. */
|
||||
#define HAVE_SYS_TYPES_H 1
|
||||
|
||||
/* Define to 1 if you have the <sys/uio.h> header file. */
|
||||
#define HAVE_SYS_UIO_H 1
|
||||
|
||||
/* Define to 1 if you have the <time.h> header file. */
|
||||
#define HAVE_TIME_H 1
|
||||
|
||||
/* Define to 1 if you have the <unistd.h> header file. */
|
||||
#define HAVE_UNISTD_H 1
|
||||
|
||||
/* Define to 1 if you have the windows.h header file. */
|
||||
/* #undef HAVE_WINDOWS_H */
|
||||
|
||||
/* Define to 1 if you have the winsock2.h header file. */
|
||||
/* #undef HAVE_WINSOCK2_H */
|
||||
|
||||
/* Define to 1 if you have the winsock.h header file. */
|
||||
/* #undef HAVE_WINSOCK_H */
|
||||
|
||||
/* Define to 1 if you have the writev function. */
|
||||
#define HAVE_WRITEV 1
|
||||
|
||||
/* Define to 1 if you have the ws2tcpip.h header file. */
|
||||
/* #undef HAVE_WS2TCPIP_H */
|
||||
|
||||
/* Define if __system_property_get exists. */
|
||||
/* #undef HAVE___SYSTEM_PROPERTY_GET */
|
||||
|
||||
/* Define to the sub-directory where libtool stores uninstalled libraries. */
|
||||
#define LT_OBJDIR ".libs/"
|
||||
|
||||
/* Define to 1 if you need the malloc.h header file even with stdlib.h */
|
||||
/* #undef NEED_MALLOC_H */
|
||||
|
||||
/* Define to 1 if you need the memory.h header file even with stdlib.h */
|
||||
/* #undef NEED_MEMORY_H */
|
||||
|
||||
/* Define to 1 if _REENTRANT preprocessor symbol must be defined. */
|
||||
#define NEED_REENTRANT 1
|
||||
|
||||
/* Define to 1 if _THREAD_SAFE preprocessor symbol must be defined. */
|
||||
/* #undef NEED_THREAD_SAFE */
|
||||
|
||||
/* cpu-machine-OS */
|
||||
#define OS "x86_64-pc-solaris2.11"
|
||||
|
||||
/* Name of package */
|
||||
#define PACKAGE "c-ares"
|
||||
|
||||
/* Define to the address where bug reports for this package should be sent. */
|
||||
#define PACKAGE_BUGREPORT "c-ares mailing list: http://lists.haxx.se/listinfo/c-ares"
|
||||
|
||||
/* Define to the full name of this package. */
|
||||
#define PACKAGE_NAME "c-ares"
|
||||
|
||||
/* Define to the full name and version of this package. */
|
||||
#define PACKAGE_STRING "c-ares 1.18.1"
|
||||
|
||||
/* Define to the one symbol short name of this package. */
|
||||
#define PACKAGE_TARNAME "c-ares"
|
||||
|
||||
/* Define to the home page for this package. */
|
||||
#define PACKAGE_URL ""
|
||||
|
||||
/* Define to the version of this package. */
|
||||
#define PACKAGE_VERSION "1.18.1"
|
||||
|
||||
/* Define to the type qualifier pointed by arg 5 for recvfrom. */
|
||||
#define RECVFROM_QUAL_ARG5
|
||||
|
||||
/* Define to the type of arg 1 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type pointed by arg 2 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG2 void
|
||||
|
||||
/* Define to 1 if the type pointed by arg 2 for recvfrom is void. */
|
||||
#define RECVFROM_TYPE_ARG2_IS_VOID 1
|
||||
|
||||
/* Define to the type of arg 3 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG4 int
|
||||
|
||||
/* Define to the type pointed by arg 5 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG5 struct sockaddr
|
||||
|
||||
/* Define to 1 if the type pointed by arg 5 for recvfrom is void. */
|
||||
/* #undef RECVFROM_TYPE_ARG5_IS_VOID */
|
||||
|
||||
/* Define to the type pointed by arg 6 for recvfrom. */
|
||||
#define RECVFROM_TYPE_ARG6 void
|
||||
|
||||
/* Define to 1 if the type pointed by arg 6 for recvfrom is void. */
|
||||
#define RECVFROM_TYPE_ARG6_IS_VOID 1
|
||||
|
||||
/* Define to the function return type for recvfrom. */
|
||||
#define RECVFROM_TYPE_RETV ssize_t
|
||||
|
||||
/* Define to the type of arg 1 for recv. */
|
||||
#define RECV_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type of arg 2 for recv. */
|
||||
#define RECV_TYPE_ARG2 void *
|
||||
|
||||
/* Define to the type of arg 3 for recv. */
|
||||
#define RECV_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for recv. */
|
||||
#define RECV_TYPE_ARG4 int
|
||||
|
||||
/* Define to the function return type for recv. */
|
||||
#define RECV_TYPE_RETV ssize_t
|
||||
|
||||
/* Define as the return type of signal handlers (`int' or `void'). */
|
||||
#define RETSIGTYPE void
|
||||
|
||||
/* Define to the type qualifier of arg 2 for send. */
|
||||
#define SEND_QUAL_ARG2 const
|
||||
|
||||
/* Define to the type of arg 1 for send. */
|
||||
#define SEND_TYPE_ARG1 int
|
||||
|
||||
/* Define to the type of arg 2 for send. */
|
||||
#define SEND_TYPE_ARG2 void *
|
||||
|
||||
/* Define to the type of arg 3 for send. */
|
||||
#define SEND_TYPE_ARG3 size_t
|
||||
|
||||
/* Define to the type of arg 4 for send. */
|
||||
#define SEND_TYPE_ARG4 int
|
||||
|
||||
/* Define to the function return type for send. */
|
||||
#define SEND_TYPE_RETV ssize_t
|
||||
|
||||
/* Define to 1 if all of the C90 standard headers exist (not just the ones
|
||||
required in a freestanding environment). This macro is provided for
|
||||
backward compatibility; new code need not use it. */
|
||||
#define STDC_HEADERS 1
|
||||
|
||||
/* Define to 1 if you can safely include both <sys/time.h> and <time.h>. This
|
||||
macro is obsolete. */
|
||||
#define TIME_WITH_SYS_TIME 1
|
||||
|
||||
/* Define to disable non-blocking sockets. */
|
||||
/* #undef USE_BLOCKING_SOCKETS */
|
||||
|
||||
/* Version number of package */
|
||||
#define VERSION "1.18.1"
|
||||
|
||||
/* Define to avoid automatic inclusion of winsock.h */
|
||||
/* #undef WIN32_LEAN_AND_MEAN */
|
||||
|
||||
/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
|
||||
significant byte first (like Motorola and SPARC, unlike Intel). */
|
||||
#if defined AC_APPLE_UNIVERSAL_BUILD
|
||||
# if defined __BIG_ENDIAN__
|
||||
# define WORDS_BIGENDIAN 1
|
||||
# endif
|
||||
#else
|
||||
# ifndef WORDS_BIGENDIAN
|
||||
/* # undef WORDS_BIGENDIAN */
|
||||
# endif
|
||||
#endif
|
||||
|
||||
/* Define to 1 if OS is AIX. */
|
||||
#ifndef _ALL_SOURCE
|
||||
/* # undef _ALL_SOURCE */
|
||||
#endif
|
||||
|
||||
/* Number of bits in a file offset, on hosts where this is settable. */
|
||||
/* #undef _FILE_OFFSET_BITS */
|
||||
|
||||
/* Define for large files, on AIX-style hosts. */
|
||||
/* #undef _LARGE_FILES */
|
||||
|
||||
/* Define to empty if `const' does not conform to ANSI C. */
|
||||
/* #undef const */
|
||||
|
||||
/* Type to use in place of in_addr_t when system does not provide it. */
|
||||
/* #undef in_addr_t */
|
||||
|
||||
/* Define to `unsigned int' if <sys/types.h> does not define. */
|
||||
/* #undef size_t */
|
2
contrib/cppkafka
vendored
2
contrib/cppkafka
vendored
@ -1 +1 @@
|
||||
Subproject commit 5a119f689f8a4d90d10a9635e7ee2bee5c127de1
|
||||
Subproject commit 9c5ea0e332486961e612deacc6e3f0c1874c688d
|
@ -51,3 +51,8 @@
|
||||
#define USE_OPENSSL
|
||||
#define USE_THREADS_POSIX
|
||||
#define USE_ARES
|
||||
|
||||
#ifdef __illumos__
|
||||
#define HAVE_POSIX_STRERROR_R 1
|
||||
#define HAVE_STRERROR_R 1
|
||||
#endif
|
||||
|
@ -26,13 +26,13 @@ const uint8_t MetroHash64::test_seed_1[8] = { 0x3B, 0x0D, 0x48, 0x1C, 0xF4, 0x
|
||||
|
||||
|
||||
|
||||
MetroHash64::MetroHash64(const uint64_t seed)
|
||||
MetroHash64::MetroHash64(uint64_t seed)
|
||||
{
|
||||
Initialize(seed);
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Initialize(const uint64_t seed)
|
||||
void MetroHash64::Initialize(uint64_t seed)
|
||||
{
|
||||
vseed = (static_cast<uint64_t>(seed) + k2) * k0;
|
||||
|
||||
@ -47,7 +47,7 @@ void MetroHash64::Initialize(const uint64_t seed)
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
void MetroHash64::Update(const uint8_t * const buffer, uint64_t length)
|
||||
{
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(buffer);
|
||||
const uint8_t * const end = ptr + length;
|
||||
@ -62,7 +62,7 @@ void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
memcpy(input.b + (bytes % 32), ptr, static_cast<size_t>(fill));
|
||||
ptr += fill;
|
||||
bytes += fill;
|
||||
|
||||
|
||||
// input buffer is still partially filled
|
||||
if ((bytes % 32) != 0) return;
|
||||
|
||||
@ -72,7 +72,7 @@ void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
state.v[2] += read_u64(&input.b[16]) * k2; state.v[2] = rotate_right(state.v[2],29) + state.v[0];
|
||||
state.v[3] += read_u64(&input.b[24]) * k3; state.v[3] = rotate_right(state.v[3],29) + state.v[1];
|
||||
}
|
||||
|
||||
|
||||
// bulk update
|
||||
bytes += static_cast<uint64_t>(end - ptr);
|
||||
while (ptr <= (end - 32))
|
||||
@ -83,14 +83,14 @@ void MetroHash64::Update(const uint8_t * const buffer, const uint64_t length)
|
||||
state.v[2] += read_u64(ptr) * k2; ptr += 8; state.v[2] = rotate_right(state.v[2],29) + state.v[0];
|
||||
state.v[3] += read_u64(ptr) * k3; ptr += 8; state.v[3] = rotate_right(state.v[3],29) + state.v[1];
|
||||
}
|
||||
|
||||
|
||||
// store remaining bytes in input buffer
|
||||
if (ptr < end)
|
||||
memcpy(input.b, ptr, static_cast<size_t>(end - ptr));
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Finalize(uint8_t * const hash)
|
||||
void MetroHash64::Finalize(uint8_t * hash)
|
||||
{
|
||||
// finalize bulk loop, if used
|
||||
if (bytes >= 32)
|
||||
@ -102,11 +102,11 @@ void MetroHash64::Finalize(uint8_t * const hash)
|
||||
|
||||
state.v[0] = vseed + (state.v[0] ^ state.v[1]);
|
||||
}
|
||||
|
||||
|
||||
// process any bytes remaining in the input buffer
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(input.b);
|
||||
const uint8_t * const end = ptr + (bytes % 32);
|
||||
|
||||
|
||||
if ((end - ptr) >= 16)
|
||||
{
|
||||
state.v[1] = state.v[0] + (read_u64(ptr) * k2); ptr += 8; state.v[1] = rotate_right(state.v[1],29) * k3;
|
||||
@ -139,7 +139,7 @@ void MetroHash64::Finalize(uint8_t * const hash)
|
||||
state.v[0] += read_u8 (ptr) * k3;
|
||||
state.v[0] ^= rotate_right(state.v[0], 37) * k1;
|
||||
}
|
||||
|
||||
|
||||
state.v[0] ^= rotate_right(state.v[0], 28);
|
||||
state.v[0] *= k0;
|
||||
state.v[0] ^= rotate_right(state.v[0], 29);
|
||||
@ -152,7 +152,7 @@ void MetroHash64::Finalize(uint8_t * const hash)
|
||||
}
|
||||
|
||||
|
||||
void MetroHash64::Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed)
|
||||
void MetroHash64::Hash(const uint8_t * buffer, uint64_t length, uint8_t * const hash, uint64_t seed)
|
||||
{
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(buffer);
|
||||
const uint8_t * const end = ptr + length;
|
||||
@ -238,7 +238,7 @@ bool MetroHash64::ImplementationVerified()
|
||||
|
||||
// verify incremental implementation
|
||||
MetroHash64 metro;
|
||||
|
||||
|
||||
metro.Initialize(0);
|
||||
metro.Update(reinterpret_cast<const uint8_t *>(MetroHash64::test_string), strlen(MetroHash64::test_string));
|
||||
metro.Finalize(hash);
|
||||
@ -262,9 +262,9 @@ void metrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(key);
|
||||
const uint8_t * const end = ptr + len;
|
||||
|
||||
|
||||
uint64_t hash = ((static_cast<uint64_t>(seed) + k2) * k0) + len;
|
||||
|
||||
|
||||
if (len >= 32)
|
||||
{
|
||||
uint64_t v[4];
|
||||
@ -272,7 +272,7 @@ void metrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v[1] = hash;
|
||||
v[2] = hash;
|
||||
v[3] = hash;
|
||||
|
||||
|
||||
do
|
||||
{
|
||||
v[0] += read_u64(ptr) * k0; ptr += 8; v[0] = rotate_right(v[0],29) + v[2];
|
||||
@ -288,7 +288,7 @@ void metrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v[1] ^= rotate_right(((v[1] + v[3]) * k1) + v[2], 33) * k0;
|
||||
hash += v[0] ^ v[1];
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 16)
|
||||
{
|
||||
uint64_t v0 = hash + (read_u64(ptr) * k0); ptr += 8; v0 = rotate_right(v0,33) * k1;
|
||||
@ -297,32 +297,32 @@ void metrohash64_1(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v1 ^= rotate_right(v1 * k3, 35) + v0;
|
||||
hash += v1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 8)
|
||||
{
|
||||
hash += read_u64(ptr) * k3; ptr += 8;
|
||||
hash ^= rotate_right(hash, 33) * k1;
|
||||
|
||||
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 4)
|
||||
{
|
||||
hash += read_u32(ptr) * k3; ptr += 4;
|
||||
hash ^= rotate_right(hash, 15) * k1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 2)
|
||||
{
|
||||
hash += read_u16(ptr) * k3; ptr += 2;
|
||||
hash ^= rotate_right(hash, 13) * k1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 1)
|
||||
{
|
||||
hash += read_u8 (ptr) * k3;
|
||||
hash ^= rotate_right(hash, 25) * k1;
|
||||
}
|
||||
|
||||
|
||||
hash ^= rotate_right(hash, 33);
|
||||
hash *= k0;
|
||||
hash ^= rotate_right(hash, 33);
|
||||
@ -336,13 +336,13 @@ void metrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
static const uint64_t k0 = 0xD6D018F5;
|
||||
static const uint64_t k1 = 0xA2AA033B;
|
||||
static const uint64_t k2 = 0x62992FC1;
|
||||
static const uint64_t k3 = 0x30BC5B29;
|
||||
static const uint64_t k3 = 0x30BC5B29;
|
||||
|
||||
const uint8_t * ptr = reinterpret_cast<const uint8_t*>(key);
|
||||
const uint8_t * const end = ptr + len;
|
||||
|
||||
|
||||
uint64_t hash = ((static_cast<uint64_t>(seed) + k2) * k0) + len;
|
||||
|
||||
|
||||
if (len >= 32)
|
||||
{
|
||||
uint64_t v[4];
|
||||
@ -350,7 +350,7 @@ void metrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v[1] = hash;
|
||||
v[2] = hash;
|
||||
v[3] = hash;
|
||||
|
||||
|
||||
do
|
||||
{
|
||||
v[0] += read_u64(ptr) * k0; ptr += 8; v[0] = rotate_right(v[0],29) + v[2];
|
||||
@ -366,7 +366,7 @@ void metrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v[1] ^= rotate_right(((v[1] + v[3]) * k1) + v[2], 30) * k0;
|
||||
hash += v[0] ^ v[1];
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 16)
|
||||
{
|
||||
uint64_t v0 = hash + (read_u64(ptr) * k2); ptr += 8; v0 = rotate_right(v0,29) * k3;
|
||||
@ -375,31 +375,31 @@ void metrohash64_2(const uint8_t * key, uint64_t len, uint32_t seed, uint8_t * o
|
||||
v1 ^= rotate_right(v1 * k3, 34) + v0;
|
||||
hash += v1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 8)
|
||||
{
|
||||
hash += read_u64(ptr) * k3; ptr += 8;
|
||||
hash ^= rotate_right(hash, 36) * k1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 4)
|
||||
{
|
||||
hash += read_u32(ptr) * k3; ptr += 4;
|
||||
hash ^= rotate_right(hash, 15) * k1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 2)
|
||||
{
|
||||
hash += read_u16(ptr) * k3; ptr += 2;
|
||||
hash ^= rotate_right(hash, 15) * k1;
|
||||
}
|
||||
|
||||
|
||||
if ((end - ptr) >= 1)
|
||||
{
|
||||
hash += read_u8 (ptr) * k3;
|
||||
hash ^= rotate_right(hash, 23) * k1;
|
||||
}
|
||||
|
||||
|
||||
hash ^= rotate_right(hash, 28);
|
||||
hash *= k0;
|
||||
hash ^= rotate_right(hash, 29);
|
||||
|
@ -25,24 +25,24 @@ public:
|
||||
static const uint32_t bits = 64;
|
||||
|
||||
// Constructor initializes the same as Initialize()
|
||||
explicit MetroHash64(const uint64_t seed=0);
|
||||
explicit MetroHash64(uint64_t seed=0);
|
||||
|
||||
// Initializes internal state for new hash with optional seed
|
||||
void Initialize(const uint64_t seed=0);
|
||||
void Initialize(uint64_t seed=0);
|
||||
|
||||
// Update the hash state with a string of bytes. If the length
|
||||
// is sufficiently long, the implementation switches to a bulk
|
||||
// hashing algorithm directly on the argument buffer for speed.
|
||||
void Update(const uint8_t * buffer, const uint64_t length);
|
||||
void Update(const uint8_t * buffer, uint64_t length);
|
||||
|
||||
// Constructs the final hash and writes it to the argument buffer.
|
||||
// After a hash is finalized, this instance must be Initialized()-ed
|
||||
// again or the behavior of Update() and Finalize() is undefined.
|
||||
void Finalize(uint8_t * const hash);
|
||||
void Finalize(uint8_t * hash);
|
||||
|
||||
// A non-incremental function implementation. This can be significantly
|
||||
// faster than the incremental implementation for some usage patterns.
|
||||
static void Hash(const uint8_t * buffer, const uint64_t length, uint8_t * const hash, const uint64_t seed=0);
|
||||
static void Hash(const uint8_t * buffer, uint64_t length, uint8_t * hash, uint64_t seed=0);
|
||||
|
||||
// Does implementation correctly execute test vectors?
|
||||
static bool ImplementationVerified();
|
||||
|
@ -61,7 +61,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
&& rm /tmp/nfpm.deb
|
||||
|
||||
ARG GO_VERSION=1.19.10
|
||||
# We need go for clickhouse-diagnostics
|
||||
# We needed go for clickhouse-diagnostics (it is not used anymore)
|
||||
RUN arch=${TARGETARCH:-amd64} \
|
||||
&& curl -Lo /tmp/go.tgz "https://go.dev/dl/go${GO_VERSION}.linux-${arch}.tar.gz" \
|
||||
&& tar -xzf /tmp/go.tgz -C /usr/local/ \
|
||||
|
@ -36,22 +36,6 @@ rm -f CMakeCache.txt
|
||||
|
||||
if [ -n "$MAKE_DEB" ]; then
|
||||
rm -rf /build/packages/root
|
||||
# NOTE: this is for backward compatibility with previous releases,
|
||||
# that does not diagnostics tool (only script).
|
||||
if [ -d /build/programs/diagnostics ]; then
|
||||
if [ -z "$SANITIZER" ]; then
|
||||
# We need to check if clickhouse-diagnostics is fine and build it
|
||||
(
|
||||
cd /build/programs/diagnostics
|
||||
make test-no-docker
|
||||
GOARCH="${DEB_ARCH}" CGO_ENABLED=0 make VERSION="$VERSION_STRING" build
|
||||
mv clickhouse-diagnostics ..
|
||||
)
|
||||
else
|
||||
echo -e "#!/bin/sh\necho 'Not implemented for this type of package'" > /build/programs/clickhouse-diagnostics
|
||||
chmod +x /build/programs/clickhouse-diagnostics
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
@ -121,8 +105,6 @@ if [ -n "$MAKE_DEB" ]; then
|
||||
# No quotes because I want it to expand to nothing if empty.
|
||||
# shellcheck disable=SC2086
|
||||
DESTDIR=/build/packages/root ninja $NINJA_FLAGS programs/install
|
||||
cp /build/programs/clickhouse-diagnostics /build/packages/root/usr/bin
|
||||
cp /build/programs/clickhouse-diagnostics /output
|
||||
bash -x /build/packages/build
|
||||
fi
|
||||
|
||||
|
@ -173,9 +173,23 @@ function fuzz
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
|
||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
# server.log -> All server logs, including sanitizer
|
||||
# stderr.log -> Process logs (sanitizer) only
|
||||
clickhouse-server \
|
||||
--config-file db/config.xml \
|
||||
--pid-file /var/run/clickhouse-server/clickhouse-server.pid \
|
||||
-- --path db \
|
||||
--logger.console=0 \
|
||||
--logger.log=server.log 2>&1 | tee -a stderr.log >> server.log 2>&1 &
|
||||
for _ in {1..30}
|
||||
do
|
||||
if clickhouse-client --query "select 1"
|
||||
then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
server_pid=$(cat /var/run/clickhouse-server/clickhouse-server.pid)
|
||||
|
||||
kill -0 $server_pid
|
||||
|
||||
@ -427,6 +441,7 @@ p.links a { padding: 5px; margin: 3px; background: #FFF; line-height: 2; white-s
|
||||
<a href="run.log">run.log</a>
|
||||
<a href="fuzzer.log.zst">fuzzer.log.zst</a>
|
||||
<a href="server.log.zst">server.log.zst</a>
|
||||
<a href="stderr.log">stderr.log</a>
|
||||
<a href="main.log">main.log</a>
|
||||
<a href="dmesg.log">dmesg.log</a>
|
||||
${CORE_LINK}
|
||||
|
@ -126,7 +126,6 @@ RUN set -x \
|
||||
|
||||
COPY modprobe.sh /usr/local/bin/modprobe
|
||||
COPY dockerd-entrypoint.sh /usr/local/bin/
|
||||
COPY compose/ /compose/
|
||||
COPY misc/ /misc/
|
||||
|
||||
|
||||
|
@ -51,22 +51,22 @@ fi
|
||||
config_logs_export_cluster /etc/clickhouse-server/config.d/system_logs_export.yaml
|
||||
|
||||
if [[ -n "$BUGFIX_VALIDATE_CHECK" ]] && [[ "$BUGFIX_VALIDATE_CHECK" -eq 1 ]]; then
|
||||
sudo cat /etc/clickhouse-server/config.d/zookeeper.xml \
|
||||
| sed "/<use_compression>1<\/use_compression>/d" \
|
||||
> /etc/clickhouse-server/config.d/zookeeper.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/zookeeper.xml.tmp /etc/clickhouse-server/config.d/zookeeper.xml
|
||||
sudo sed -i "/<use_compression>1<\/use_compression>/d" /etc/clickhouse-server/config.d/zookeeper.xml
|
||||
|
||||
# it contains some new settings, but we can safely remove it
|
||||
rm /etc/clickhouse-server/config.d/handlers.yaml
|
||||
rm /etc/clickhouse-server/users.d/s3_cache_new.xml
|
||||
rm /etc/clickhouse-server/config.d/zero_copy_destructive_operations.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "/<$1>$2<\/$1>/d" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
}
|
||||
# commit_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
@ -77,7 +77,7 @@ fi
|
||||
if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -88,10 +88,10 @@ if [ "$NUM_TRIES" -gt "1" ]; then
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
mkdir -p /var/run/clickhouse-server
|
||||
# simplest way to forward env variables to server
|
||||
@ -101,25 +101,13 @@ else
|
||||
fi
|
||||
|
||||
if [[ -n "$USE_DATABASE_REPLICATED" ]] && [[ "$USE_DATABASE_REPLICATED" -eq 1 ]]; then
|
||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" \
|
||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_1/</filesystem_caches_path>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server2/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" \
|
||||
> /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches/</filesystem_caches_path>|<filesystem_caches_path>/var/lib/clickhouse/filesystem_caches_2/</filesystem_caches_path>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server1/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" \
|
||||
> /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server1/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_1/</custom_cached_disks_base_directory>|" /etc/clickhouse-server1/config.d/filesystem_caches_path.xml
|
||||
|
||||
sudo cat /etc/clickhouse-server2/config.d/filesystem_caches_path.xml \
|
||||
| sed "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_2/</custom_cached_disks_base_directory>|" \
|
||||
> /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp
|
||||
mv /etc/clickhouse-server2/config.d/filesystem_caches_path.xml.tmp /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
sudo sed -i "s|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches/</custom_cached_disks_base_directory>|<custom_cached_disks_base_directory replace=\"replace\">/var/lib/clickhouse/filesystem_caches_2/</custom_cached_disks_base_directory>|" /etc/clickhouse-server2/config.d/filesystem_caches_path.xml
|
||||
|
||||
mkdir -p /var/run/clickhouse-server1
|
||||
sudo chown clickhouse:clickhouse /var/run/clickhouse-server1
|
||||
|
@ -215,7 +215,7 @@ function check_server_start()
|
||||
function check_logs_for_critical_errors()
|
||||
{
|
||||
# Sanitizer asserts
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' >> /test_output/tmp
|
||||
sed -n '/WARNING:.*anitizer/,/^$/p' /var/log/clickhouse-server/stderr.log >> /test_output/tmp
|
||||
rg -Fav -e "ASan doesn't fully support makecontext/swapcontext functions" -e "DB::Exception" /test_output/tmp > /dev/null \
|
||||
&& echo -e "Sanitizer assert (in stderr.log)$FAIL$(head_escaped /test_output/tmp)" >> /test_output/test_results.tsv \
|
||||
|| echo -e "No sanitizer asserts$OK" >> /test_output/test_results.tsv
|
||||
|
@ -27,7 +27,7 @@ install_packages package_folder
|
||||
# and find more potential issues.
|
||||
export THREAD_FUZZER_CPU_TIME_PERIOD_US=1000
|
||||
export THREAD_FUZZER_SLEEP_PROBABILITY=0.1
|
||||
export THREAD_FUZZER_SLEEP_TIME_US=100000
|
||||
export THREAD_FUZZER_SLEEP_TIME_US_MAX=100000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_MIGRATE_PROBABILITY=1
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_MIGRATE_PROBABILITY=1
|
||||
@ -38,11 +38,11 @@ export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_PROBABILITY=0.001
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US=10000
|
||||
export THREAD_FUZZER_pthread_mutex_lock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_BEFORE_SLEEP_TIME_US_MAX=10000
|
||||
export THREAD_FUZZER_pthread_mutex_unlock_AFTER_SLEEP_TIME_US_MAX=10000
|
||||
|
||||
export THREAD_FUZZER_EXPLICIT_SLEEP_PROBABILITY=0.01
|
||||
export THREAD_FUZZER_EXPLICIT_MEMORY_EXCEPTION_PROBABILITY=0.01
|
||||
|
@ -8,20 +8,22 @@ ARG apt_archive="http://archive.ubuntu.com"
|
||||
RUN sed -i "s|http://archive.ubuntu.com|$apt_archive|g" /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
||||
aspell \
|
||||
curl \
|
||||
git \
|
||||
file \
|
||||
libxml2-utils \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
locales \
|
||||
&& pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||
requests types-requests \
|
||||
aspell \
|
||||
curl \
|
||||
git \
|
||||
file \
|
||||
libxml2-utils \
|
||||
moreutils \
|
||||
python3-fuzzywuzzy \
|
||||
python3-pip \
|
||||
yamllint \
|
||||
locales \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/* \
|
||||
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*
|
||||
|
||||
# python-magic is the same version as in Ubuntu 22.04
|
||||
RUN pip3 install black==23.12.0 boto3 codespell==2.2.1 mypy==1.8.0 PyGithub unidiff pylint==3.1.0 \
|
||||
python-magic==0.4.24 requests types-requests \
|
||||
&& rm -rf /root/.cache/pip
|
||||
|
||||
RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen && locale-gen en_US.UTF-8
|
||||
|
@ -67,10 +67,7 @@ configure
|
||||
|
||||
function remove_keeper_config()
|
||||
{
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "/<$1>$2<\/$1>/d" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "/<$1>$2<\/$1>/d" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
}
|
||||
|
||||
# async_replication setting doesn't exist on some older versions
|
||||
@ -80,16 +77,10 @@ remove_keeper_config "async_replication" "1"
|
||||
remove_keeper_config "create_if_not_exists" "[01]"
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
# latest_logs_cache_size_threshold setting doesn't exist on some older versions
|
||||
remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
@ -120,22 +111,13 @@ export ZOOKEEPER_FAULT_INJECTION=0
|
||||
configure
|
||||
|
||||
# force_sync=false doesn't work correctly on some older versions
|
||||
sudo cat /etc/clickhouse-server/config.d/keeper_port.xml \
|
||||
| sed "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" \
|
||||
> /etc/clickhouse-server/config.d/keeper_port.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/keeper_port.xml.tmp /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
sudo sed -i "s|<force_sync>false</force_sync>|<force_sync>true</force_sync>|" /etc/clickhouse-server/config.d/keeper_port.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/azure_storage_conf.xml \
|
||||
| sed "s|<object_storage_type>azure|<object_storage_type>azure_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/azure_storage_conf.xml.tmp /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>azure<|<object_storage_type>azure_blob_storage<|" /etc/clickhouse-server/config.d/azure_storage_conf.xml
|
||||
|
||||
#todo: remove these after 24.3 released.
|
||||
sudo cat /etc/clickhouse-server/config.d/storage_conf.xml \
|
||||
| sed "s|<object_storage_type>local|<object_storage_type>local_blob_storage|" \
|
||||
> /etc/clickhouse-server/config.d/storage_conf.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/storage_conf.xml.tmp /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
sudo sed -i "s|<object_storage_type>local<|<object_storage_type>local_blob_storage<|" /etc/clickhouse-server/config.d/storage_conf.xml
|
||||
|
||||
# async_replication setting doesn't exist on some older versions
|
||||
remove_keeper_config "async_replication" "1"
|
||||
@ -150,10 +132,7 @@ remove_keeper_config "latest_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
remove_keeper_config "commit_logs_cache_size_threshold" "[[:digit:]]\+"
|
||||
|
||||
# But we still need default disk because some tables loaded only into it
|
||||
sudo cat /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml \
|
||||
| sed "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" \
|
||||
> /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp
|
||||
mv /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml.tmp /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo sed -i "s|<main><disk>s3</disk></main>|<main><disk>s3</disk></main><default><disk>default</disk></default>|" /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chown clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
sudo chgrp clickhouse /etc/clickhouse-server/config.d/s3_storage_policy_by_default.xml
|
||||
|
||||
@ -256,10 +235,7 @@ then
|
||||
fi
|
||||
|
||||
# Just in case previous version left some garbage in zk
|
||||
sudo cat /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
| sed "s|>1<|>0<|g" \
|
||||
> /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp
|
||||
sudo mv /etc/clickhouse-server/config.d/lost_forever_check.xml.tmp /etc/clickhouse-server/config.d/lost_forever_check.xml
|
||||
sudo sed -i "s|>1<|>0<|g" /etc/clickhouse-server/config.d/lost_forever_check.xml \
|
||||
rm /etc/clickhouse-server/config.d/filesystem_caches_path.xml
|
||||
|
||||
start 500
|
||||
|
@ -4,7 +4,11 @@ sidebar_position: 50
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL
|
||||
import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
|
||||
|
||||
# MySQL Database Engine
|
||||
|
||||
<CloudNotSupportedBadge />
|
||||
|
||||
Allows to connect to databases on a remote MySQL server and perform `INSERT` and `SELECT` queries to exchange data between ClickHouse and MySQL.
|
||||
|
||||
|
@ -4,7 +4,11 @@ sidebar_position: 138
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
# MySQL
|
||||
import CloudAvailableBadge from '@theme/badges/CloudAvailableBadge';
|
||||
|
||||
# MySQL Table Engine
|
||||
|
||||
<CloudAvailableBadge />
|
||||
|
||||
The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server.
|
||||
|
||||
|
@ -8,6 +8,10 @@ sidebar_label: PostgreSQL
|
||||
|
||||
The PostgreSQL engine allows to perform `SELECT` and `INSERT` queries on data that is stored on a remote PostgreSQL server.
|
||||
|
||||
:::note
|
||||
Currently, only PostgreSQL versions 12 and up are supported.
|
||||
:::
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
|
@ -75,14 +75,14 @@ This is the output of `DESCRIBE`. Down further in this guide the field type cho
|
||||
</TabItem>
|
||||
<TabItem value="selfmanaged" label="Self-managed">
|
||||
|
||||
1. Download the snapshot of the dataset from February 2021: [cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (729 MB).
|
||||
1. Download the snapshot of the dataset from February 2021: [cell_towers.csv.xz](https://datasets.clickhouse.com/cell_towers.csv.xz) (686 MB).
|
||||
|
||||
2. Validate the integrity (optional step):
|
||||
```bash
|
||||
md5sum cell_towers.csv.xz
|
||||
```
|
||||
```response
|
||||
8cf986f4a0d9f12c6f384a0e9192c908 cell_towers.csv.xz
|
||||
8a797f7bdb55faba93f6cbc37d47b037 cell_towers.csv.xz
|
||||
```
|
||||
|
||||
3. Decompress it with the following command:
|
||||
@ -132,7 +132,7 @@ SELECT radio, count() AS c FROM cell_towers GROUP BY radio ORDER BY c DESC
|
||||
┌─radio─┬────────c─┐
|
||||
│ UMTS │ 20686487 │
|
||||
│ LTE │ 12101148 │
|
||||
│ GSM │ 9931312 │
|
||||
│ GSM │ 9931304 │
|
||||
│ CDMA │ 556344 │
|
||||
│ NR │ 867 │
|
||||
└───────┴──────────┘
|
||||
|
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
293
docs/en/getting-started/example-datasets/tw-weather.md
Normal file
@ -0,0 +1,293 @@
|
||||
---
|
||||
slug: /en/getting-started/example-datasets/tw-weather
|
||||
sidebar_label: Taiwan Historical Weather Datasets
|
||||
sidebar_position: 1
|
||||
description: 131 million rows of weather observation data for the last 128 yrs
|
||||
---
|
||||
|
||||
# Taiwan Historical Weather Datasets
|
||||
|
||||
This dataset contains historical meteorological observations measurements for the last 128 years. Each row is a measurement for a point in date time and weather station.
|
||||
|
||||
The origin of this dataset is available [here](https://github.com/Raingel/historical_weather) and the list of weather station numbers can be found [here](https://github.com/Raingel/weather_station_list).
|
||||
|
||||
> The sources of meteorological datasets include the meteorological stations that are established by the Central Weather Administration (station code is beginning with C0, C1, and 4) and the agricultural meteorological stations belonging to the Council of Agriculture (station code other than those mentioned above):
|
||||
|
||||
- StationId
|
||||
- MeasuredDate, the observation time
|
||||
- StnPres, the station air pressure
|
||||
- SeaPres, the sea level pressure
|
||||
- Td, the dew point temperature
|
||||
- RH, the relative humidity
|
||||
- Other elements where available
|
||||
|
||||
## Downloading the data
|
||||
|
||||
- A [pre-processed version](#pre-processed-data) of the data for the ClickHouse, which has been cleaned, re-structured, and enriched. This dataset covers the years from 1896 to 2023.
|
||||
- [Download the original raw data](#original-raw-data) and convert to the format required by ClickHouse. Users wanting to add their own columns may wish to explore or complete their approaches.
|
||||
|
||||
### Pre-processed data
|
||||
|
||||
The dataset has also been re-structured from a measurement per line to a row per weather station id and measured date, i.e.
|
||||
|
||||
```csv
|
||||
StationId,MeasuredDate,StnPres,Tx,RH,WS,WD,WSGust,WDGust,Precp,GloblRad,TxSoil0cm,TxSoil5cm,TxSoil20cm,TxSoil50cm,TxSoil100cm,SeaPres,Td,PrecpHour,SunShine,TxSoil10cm,EvapA,Visb,UVI,Cloud Amount,TxSoil30cm,TxSoil200cm,TxSoil300cm,TxSoil500cm,VaporPressure
|
||||
C0X100,2016-01-01 01:00:00,1022.1,16.1,72,1.1,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 02:00:00,1021.6,16.0,73,1.2,358.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 03:00:00,1021.3,15.8,74,1.5,353.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
C0X100,2016-01-01 04:00:00,1021.2,15.8,74,1.7,8.0,,,,,,,,,,,,,,,,,,,,,,,
|
||||
```
|
||||
|
||||
It is easy to query and ensure that the resulting table has less sparse and some elements are null because they're not available to be measured in this weather station.
|
||||
|
||||
This dataset is available in the following Google CloudStorage location. Either download the dataset to your local filesystem (and insert them with the ClickHouse client) or insert them directly into the ClickHouse (see [Inserting from URL](#inserting-from-url)).
|
||||
|
||||
To download:
|
||||
|
||||
```bash
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/preprocessed_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum preprocessed_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: 11b484f5bd9ddafec5cfb131eb2dd008
|
||||
|
||||
tar -xzvf preprocessed_weather_daily_1896_2023.tar.gz
|
||||
daily_weather_preprocessed_1896_2023.csv
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum daily_weather_preprocessed_1896_2023.csv
|
||||
# Checksum should be equal to: 1132248c78195c43d93f843753881754
|
||||
```
|
||||
|
||||
### Original raw data
|
||||
|
||||
The following details are about the steps to download the original raw data to transform and convert as you want.
|
||||
|
||||
#### Download
|
||||
|
||||
To download the original raw data:
|
||||
|
||||
```bash
|
||||
mkdir tw_raw_weather_data && cd tw_raw_weather_data
|
||||
|
||||
wget https://storage.googleapis.com/taiwan-weather-observaiton-datasets/raw_data_weather_daily_1896_2023.tar.gz
|
||||
|
||||
# Option: Validate the checksum
|
||||
md5sum raw_data_weather_daily_1896_2023.tar.gz
|
||||
# Checksum should be equal to: b66b9f137217454d655e3004d7d1b51a
|
||||
|
||||
tar -xzvf raw_data_weather_daily_1896_2023.tar.gz
|
||||
466920_1928.csv
|
||||
466920_1929.csv
|
||||
466920_1930.csv
|
||||
466920_1931.csv
|
||||
...
|
||||
|
||||
# Option: Validate the checksum
|
||||
cat *.csv | md5sum
|
||||
# Checksum should be equal to: b26db404bf84d4063fac42e576464ce1
|
||||
```
|
||||
|
||||
#### Retrieve the Taiwan weather stations
|
||||
|
||||
```bash
|
||||
wget -O weather_sta_list.csv https://github.com/Raingel/weather_station_list/raw/main/data/weather_sta_list.csv
|
||||
|
||||
# Option: Convert the UTF-8-BOM to UTF-8 encoding
|
||||
sed -i '1s/^\xEF\xBB\xBF//' weather_sta_list.csv
|
||||
```
|
||||
|
||||
## Create table schema
|
||||
|
||||
Create the MergeTree table in ClickHouse (from the ClickHouse client).
|
||||
|
||||
```bash
|
||||
CREATE TABLE tw_weather_data (
|
||||
StationId String null,
|
||||
MeasuredDate DateTime64,
|
||||
StnPres Float64 null,
|
||||
SeaPres Float64 null,
|
||||
Tx Float64 null,
|
||||
Td Float64 null,
|
||||
RH Float64 null,
|
||||
WS Float64 null,
|
||||
WD Float64 null,
|
||||
WSGust Float64 null,
|
||||
WDGust Float64 null,
|
||||
Precp Float64 null,
|
||||
PrecpHour Float64 null,
|
||||
SunShine Float64 null,
|
||||
GloblRad Float64 null,
|
||||
TxSoil0cm Float64 null,
|
||||
TxSoil5cm Float64 null,
|
||||
TxSoil10cm Float64 null,
|
||||
TxSoil20cm Float64 null,
|
||||
TxSoil50cm Float64 null,
|
||||
TxSoil100cm Float64 null,
|
||||
TxSoil30cm Float64 null,
|
||||
TxSoil200cm Float64 null,
|
||||
TxSoil300cm Float64 null,
|
||||
TxSoil500cm Float64 null,
|
||||
VaporPressure Float64 null,
|
||||
UVI Float64 null,
|
||||
"Cloud Amount" Float64 null,
|
||||
EvapA Float64 null,
|
||||
Visb Float64 null
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY (MeasuredDate);
|
||||
```
|
||||
|
||||
## Inserting into ClickHouse
|
||||
|
||||
### Inserting from local file
|
||||
|
||||
Data can be inserted from a local file as follows (from the ClickHouse client):
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data FROM INFILE '/path/to/daily_weather_preprocessed_1896_2023.csv'
|
||||
```
|
||||
|
||||
where `/path/to` represents the specific user path to the local file on the disk.
|
||||
|
||||
And the sample response output is as follows after inserting data into the ClickHouse:
|
||||
|
||||
```response
|
||||
Query id: 90e4b524-6e14-4855-817c-7e6f98fbeabb
|
||||
|
||||
Ok.
|
||||
131985329 rows in set. Elapsed: 71.770 sec. Processed 131.99 million rows, 10.06 GB (1.84 million rows/s., 140.14 MB/s.)
|
||||
Peak memory usage: 583.23 MiB.
|
||||
```
|
||||
|
||||
### Inserting from URL
|
||||
|
||||
```sql
|
||||
INSERT INTO tw_weather_data SELECT *
|
||||
FROM url('https://storage.googleapis.com/taiwan-weather-observaiton-datasets/daily_weather_preprocessed_1896_2023.csv', 'CSVWithNames')
|
||||
|
||||
```
|
||||
To know how to speed this up, please see our blog post on [tuning large data loads](https://clickhouse.com/blog/supercharge-your-clickhouse-data-loads-part2).
|
||||
|
||||
## Check data rows and sizes
|
||||
|
||||
1. Let's see how many rows are inserted:
|
||||
|
||||
```sql
|
||||
SELECT formatReadableQuantity(count())
|
||||
FROM tw_weather_data;
|
||||
```
|
||||
|
||||
```response
|
||||
┌─formatReadableQuantity(count())─┐
|
||||
│ 131.99 million │
|
||||
└─────────────────────────────────┘
|
||||
```
|
||||
|
||||
2. Let's see how much disk space are used for this table:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
formatReadableSize(sum(bytes)) AS disk_size,
|
||||
formatReadableSize(sum(data_uncompressed_bytes)) AS uncompressed_size
|
||||
FROM system.parts
|
||||
WHERE (`table` = 'tw_weather_data') AND active
|
||||
```
|
||||
|
||||
```response
|
||||
┌─disk_size─┬─uncompressed_size─┐
|
||||
│ 2.13 GiB │ 32.94 GiB │
|
||||
└───────────┴───────────────────┘
|
||||
```
|
||||
|
||||
## Sample queries
|
||||
|
||||
### Q1: Retrieve the highest dew point temperature for each weather station in the specific year
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StationId,
|
||||
max(Td) AS max_td
|
||||
FROM tw_weather_data
|
||||
WHERE (year(MeasuredDate) = 2023) AND (Td IS NOT NULL)
|
||||
GROUP BY StationId
|
||||
|
||||
┌─StationId─┬─max_td─┐
|
||||
│ 466940 │ 1 │
|
||||
│ 467300 │ 1 │
|
||||
│ 467540 │ 1 │
|
||||
│ 467490 │ 1 │
|
||||
│ 467080 │ 1 │
|
||||
│ 466910 │ 1 │
|
||||
│ 467660 │ 1 │
|
||||
│ 467270 │ 1 │
|
||||
│ 467350 │ 1 │
|
||||
│ 467571 │ 1 │
|
||||
│ 466920 │ 1 │
|
||||
│ 467650 │ 1 │
|
||||
│ 467550 │ 1 │
|
||||
│ 467480 │ 1 │
|
||||
│ 467610 │ 1 │
|
||||
│ 467050 │ 1 │
|
||||
│ 467590 │ 1 │
|
||||
│ 466990 │ 1 │
|
||||
│ 467060 │ 1 │
|
||||
│ 466950 │ 1 │
|
||||
│ 467620 │ 1 │
|
||||
│ 467990 │ 1 │
|
||||
│ 466930 │ 1 │
|
||||
│ 467110 │ 1 │
|
||||
│ 466881 │ 1 │
|
||||
│ 467410 │ 1 │
|
||||
│ 467441 │ 1 │
|
||||
│ 467420 │ 1 │
|
||||
│ 467530 │ 1 │
|
||||
│ 466900 │ 1 │
|
||||
└───────────┴────────┘
|
||||
|
||||
30 rows in set. Elapsed: 0.045 sec. Processed 6.41 million rows, 187.33 MB (143.92 million rows/s., 4.21 GB/s.)
|
||||
```
|
||||
|
||||
### Q2: Raw data fetching with the specific duration time range, fields and weather station
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
StnPres,
|
||||
SeaPres,
|
||||
Tx,
|
||||
Td,
|
||||
RH,
|
||||
WS,
|
||||
WD,
|
||||
WSGust,
|
||||
WDGust,
|
||||
Precp,
|
||||
PrecpHour
|
||||
FROM tw_weather_data
|
||||
WHERE (StationId = 'C0UB10') AND (MeasuredDate >= '2023-12-23') AND (MeasuredDate < '2023-12-24')
|
||||
ORDER BY MeasuredDate ASC
|
||||
LIMIT 10
|
||||
```
|
||||
|
||||
```response
|
||||
┌─StnPres─┬─SeaPres─┬───Tx─┬───Td─┬─RH─┬──WS─┬──WD─┬─WSGust─┬─WDGust─┬─Precp─┬─PrecpHour─┐
|
||||
│ 1029.5 │ ᴺᵁᴸᴸ │ 11.8 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 271 │ 5.5 │ 275 │ -99.8 │ -99.8 │
|
||||
│ 1029.8 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 78 │ 2.7 │ 289 │ 5.5 │ 308 │ -99.8 │ -99.8 │
|
||||
│ 1028.6 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 79 │ 2.3 │ 251 │ 6.1 │ 289 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 13 │ ᴺᵁᴸᴸ │ 75 │ 4.3 │ 312 │ 7.5 │ 316 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.1 │ ᴺᵁᴸᴸ │ 89 │ 7.1 │ 310 │ 11.6 │ 322 │ -99.8 │ -99.8 │
|
||||
│ 1027.8 │ ᴺᵁᴸᴸ │ 11.6 │ ᴺᵁᴸᴸ │ 90 │ 3.1 │ 269 │ 10.7 │ 295 │ -99.8 │ -99.8 │
|
||||
│ 1027.9 │ ᴺᵁᴸᴸ │ 12.3 │ ᴺᵁᴸᴸ │ 89 │ 4.7 │ 296 │ 8.1 │ 310 │ -99.8 │ -99.8 │
|
||||
│ 1028.2 │ ᴺᵁᴸᴸ │ 12.2 │ ᴺᵁᴸᴸ │ 94 │ 2.5 │ 246 │ 7.1 │ 283 │ -99.8 │ -99.8 │
|
||||
│ 1028.4 │ ᴺᵁᴸᴸ │ 12.5 │ ᴺᵁᴸᴸ │ 94 │ 3.1 │ 265 │ 4.8 │ 297 │ -99.8 │ -99.8 │
|
||||
│ 1028.3 │ ᴺᵁᴸᴸ │ 13.6 │ ᴺᵁᴸᴸ │ 91 │ 1.2 │ 273 │ 4.4 │ 256 │ -99.8 │ -99.8 │
|
||||
└─────────┴─────────┴──────┴──────┴────┴─────┴─────┴────────┴────────┴───────┴───────────┘
|
||||
|
||||
10 rows in set. Elapsed: 0.009 sec. Processed 91.70 thousand rows, 2.33 MB (9.67 million rows/s., 245.31 MB/s.)
|
||||
```
|
||||
|
||||
## Credits
|
||||
|
||||
We would like to acknowledge the efforts of the Central Weather Administration and Agricultural Meteorological Observation Network (Station) of the Council of Agriculture for preparing, cleaning, and distributing this dataset. We appreciate your efforts.
|
||||
|
||||
Ou, J.-H., Kuo, C.-H., Wu, Y.-F., Lin, G.-C., Lee, M.-H., Chen, R.-K., Chou, H.-P., Wu, H.-Y., Chu, S.-C., Lai, Q.-J., Tsai, Y.-C., Lin, C.-C., Kuo, C.-C., Liao, C.-T., Chen, Y.-N., Chu, Y.-W., Chen, C.-Y., 2023. Application-oriented deep learning model for early warning of rice blast in Taiwan. Ecological Informatics 73, 101950. https://doi.org/10.1016/j.ecoinf.2022.101950 [13/12/2022]
|
@ -262,7 +262,7 @@ The required version can be downloaded with `curl` or `wget` from repository htt
|
||||
After that downloaded archives should be unpacked and installed with installation scripts. Example for the latest stable version:
|
||||
|
||||
``` bash
|
||||
LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
|
||||
LATEST_VERSION=$(curl -s https://raw.githubusercontent.com/ClickHouse/ClickHouse/master/utils/list-versions/version_date.tsv | \
|
||||
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
|
||||
export LATEST_VERSION
|
||||
|
||||
|
@ -178,7 +178,7 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--password` – The password. Default value: empty string.
|
||||
- `--ask-password` - Prompt the user to enter a password.
|
||||
- `--query, -q` – The query to process when using non-interactive mode. `--query` can be specified multiple times, e.g. `--query "SELECT 1" --query "SELECT 2"`. Cannot be used simultaneously with `--queries-file`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--query queries1.sql --query queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--queries-file` – file path with queries to execute. `--queries-file` can be specified multiple times, e.g. `--queries-file queries1.sql --queries-file queries2.sql`. Cannot be used simultaneously with `--query`.
|
||||
- `--multiquery, -n` – If specified, multiple queries separated by semicolons can be listed after the `--query` option. For convenience, it is also possible to omit `--query` and pass the queries directly after `--multiquery`.
|
||||
- `--multiline, -m` – If specified, allow multiline queries (do not send the query on Enter).
|
||||
- `--database, -d` – Select the current default database. Default value: the current database from the server settings (‘default’ by default).
|
||||
|
@ -379,6 +379,18 @@ Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## max_waiting_queries
|
||||
|
||||
Limit on total number of concurrently waiting queries. Execution of a waiting query is blocked while required tables are loading asynchronously (see `async_load_databases`). Note that waiting queries are not counted when `max_concurrent_queries`, `max_concurrent_insert_queries`, `max_concurrent_select_queries`, `max_concurrent_queries_for_user` and `max_concurrent_queries_for_all_users` limits are checked. This correction is done to avoid hitting these limits just after server startup. Zero means unlimited.
|
||||
|
||||
:::note
|
||||
This setting can be modified at runtime and will take effect immediately. Queries that are already running will remain unchanged.
|
||||
:::
|
||||
|
||||
Type: UInt64
|
||||
|
||||
Default: 0
|
||||
|
||||
## max_connections
|
||||
|
||||
Max server connections.
|
||||
@ -933,9 +945,9 @@ Hard limit is configured via system tools
|
||||
|
||||
## database_atomic_delay_before_drop_table_sec {#database_atomic_delay_before_drop_table_sec}
|
||||
|
||||
The delay before a table data is dropped in seconds. If the `DROP TABLE` query has a `SYNC` modifier, this setting is ignored.
|
||||
Sets the delay before remove table data in seconds. If the query has `SYNC` modifier, this setting is ignored.
|
||||
|
||||
Default value: `480` (8 minutes).
|
||||
Default value: `480` (8 minute).
|
||||
|
||||
## database_catalog_unused_dir_hide_timeout_sec {#database_catalog_unused_dir_hide_timeout_sec}
|
||||
|
||||
@ -1725,7 +1737,7 @@ Default value: `0.5`.
|
||||
|
||||
Asynchronous loading of databases and tables.
|
||||
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up.
|
||||
If `true` all non-system databases with `Ordinary`, `Atomic` and `Replicated` engine will be loaded asynchronously after the ClickHouse server start up. See `system.asynchronous_loader` table, `tables_loader_background_pool_size` and `tables_loader_foreground_pool_size` server settings. Any query that tries to access a table, that is not yet loaded, will wait for exactly this table to be started up. If load job fails, query will rethrow an error (instead of shutting down the whole server in case of `async_load_databases = false`). The table that is waited for by at least one query will be loaded with higher priority. DDL queries on a database will wait for exactly that database to be started up. Also consider setting a limit `max_waiting_queries` for the total number of waiting queries.
|
||||
|
||||
If `false`, all databases are loaded when the server starts.
|
||||
|
||||
@ -2926,7 +2938,7 @@ Default: 0
|
||||
|
||||
## ignore_empty_sql_security_in_create_view_query {#ignore_empty_sql_security_in_create_view_query}
|
||||
|
||||
If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries.
|
||||
If true, ClickHouse doesn't write defaults for empty SQL security statement in CREATE VIEW queries.
|
||||
|
||||
:::note
|
||||
This setting is only necessary for the migration period and will become obsolete in 24.4
|
||||
|
@ -4,6 +4,67 @@ sidebar_label: Polygons
|
||||
title: "Functions for Working with Polygons"
|
||||
---
|
||||
|
||||
## WKT
|
||||
|
||||
Returns a WKT (Well Known Text) geometric object from various [Geo Data Types](../../data-types/geo.md). Supported WKT objects are:
|
||||
|
||||
- POINT
|
||||
- POLYGON
|
||||
- MULTIPOLYGON
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
WKT(geo_data)
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
`geo_data` can be one of the following [Geo Data Types](../../data-types/geo.md) or their underlying primitive types:
|
||||
|
||||
- [Point](../../data-types/geo.md#point)
|
||||
- [Ring](../../data-types/geo.md#ring)
|
||||
- [Polygon](../../data-types/geo.md#polygon)
|
||||
- [MultiPolygon](../../data-types/geo.md#multipolygon)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- WKT geometric object `POINT` is returned for a Point.
|
||||
- WKT geometric object `POLYGON` is returned for a Polygon
|
||||
- WKT geometric object `MULTIPOLYGON` is returned for a MultiPolygon.
|
||||
|
||||
**Examples**
|
||||
|
||||
POINT from tuple:
|
||||
|
||||
```sql
|
||||
SELECT wkt((0., 0.));
|
||||
```
|
||||
|
||||
```response
|
||||
POINT(0 0)
|
||||
```
|
||||
|
||||
POLYGON from an array of tuples or an array of tuple arrays:
|
||||
|
||||
```sql
|
||||
SELECT wkt([(0., 0.), (10., 0.), (10., 10.), (0., 10.)]);
|
||||
```
|
||||
|
||||
```response
|
||||
POLYGON((0 0,10 0,10 10,0 10))
|
||||
```
|
||||
|
||||
MULTIPOLYGON from an array of multi-dimensional tuple arrays:
|
||||
|
||||
```sql
|
||||
SELECT wkt([[[(0., 0.), (10., 0.), (10., 10.), (0., 10.)], [(4., 4.), (5., 4.), (5., 5.), (4., 5.)]], [[(-10., -10.), (-10., -9.), (-9., 10.)]]]);
|
||||
```
|
||||
|
||||
```response
|
||||
MULTIPOLYGON(((0 0,10 0,10 10,0 10,0 0),(4 4,5 4,5 5,4 5,4 4)),((-10 -10,-10 -9,-9 10,-10 -10)))
|
||||
```
|
||||
|
||||
## readWKTMultiPolygon
|
||||
|
||||
Converts a WKT (Well Known Text) MultiPolygon into a MultiPolygon type.
|
||||
|
@ -350,6 +350,7 @@ ALTER TABLE mt DELETE IN PARTITION ID '2' WHERE p = 2;
|
||||
You can specify the partition expression in `ALTER ... PARTITION` queries in different ways:
|
||||
|
||||
- As a value from the `partition` column of the `system.parts` table. For example, `ALTER TABLE visits DETACH PARTITION 201901`.
|
||||
- Using the keyword `ALL`. It can be used only with DROP/DETACH/ATTACH. For example, `ALTER TABLE visits ATTACH PARTITION ALL`.
|
||||
- As a tuple of expressions or constants that matches (in types) the table partitioning keys tuple. In the case of a single element partitioning key, the expression should be wrapped in the `tuple (...)` function. For example, `ALTER TABLE visits DETACH PARTITION tuple(toYYYYMM(toDate('2019-01-25')))`.
|
||||
- Using the partition ID. Partition ID is a string identifier of the partition (human-readable, if possible) that is used as the names of partitions in the file system and in ZooKeeper. The partition ID must be specified in the `PARTITION ID` clause, in a single quotes. For example, `ALTER TABLE visits DETACH PARTITION ID '201901'`.
|
||||
- In the [ALTER ATTACH PART](#alter_attach-partition) and [DROP DETACHED PART](#alter_drop-detached) query, to specify the name of a part, use string literal with a value from the `name` column of the [system.detached_parts](/docs/en/operations/system-tables/detached_parts.md/#system_tables-detached_parts) table. For example, `ALTER TABLE visits ATTACH PART '201901_1_1_0'`.
|
||||
|
@ -13,6 +13,13 @@ a system table called `system.dropped_tables`.
|
||||
|
||||
If you have a materialized view without a `TO` clause associated with the dropped table, then you will also have to UNDROP the inner table of that view.
|
||||
|
||||
:::note
|
||||
UNDROP TABLE is experimental. To use it add this setting:
|
||||
```sql
|
||||
set allow_experimental_undrop_table_query = 1;
|
||||
```
|
||||
:::
|
||||
|
||||
:::tip
|
||||
Also see [DROP TABLE](/docs/en/sql-reference/statements/drop.md)
|
||||
:::
|
||||
@ -25,53 +32,60 @@ UNDROP TABLE [db.]name [UUID '<uuid>'] [ON CLUSTER cluster]
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
set allow_experimental_undrop_table_query = 1;
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE TABLE tab
|
||||
CREATE TABLE undropMe
|
||||
(
|
||||
`id` UInt8
|
||||
)
|
||||
ENGINE = MergeTree
|
||||
ORDER BY id;
|
||||
|
||||
DROP TABLE tab;
|
||||
|
||||
SELECT *
|
||||
FROM system.dropped_tables
|
||||
FORMAT Vertical;
|
||||
ORDER BY id
|
||||
```
|
||||
|
||||
```sql
|
||||
DROP TABLE undropMe
|
||||
```
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.dropped_tables
|
||||
FORMAT Vertical
|
||||
```
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
index: 0
|
||||
database: default
|
||||
table: tab
|
||||
table: undropMe
|
||||
uuid: aa696a1a-1d70-4e60-a841-4c80827706cc
|
||||
engine: MergeTree
|
||||
metadata_dropped_path: /var/lib/clickhouse/metadata_dropped/default.tab.aa696a1a-1d70-4e60-a841-4c80827706cc.sql
|
||||
metadata_dropped_path: /var/lib/clickhouse/metadata_dropped/default.undropMe.aa696a1a-1d70-4e60-a841-4c80827706cc.sql
|
||||
table_dropped_time: 2023-04-05 14:12:12
|
||||
|
||||
1 row in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
```sql
|
||||
UNDROP TABLE tab;
|
||||
|
||||
UNDROP TABLE undropMe
|
||||
```
|
||||
```response
|
||||
Ok.
|
||||
```
|
||||
```sql
|
||||
SELECT *
|
||||
FROM system.dropped_tables
|
||||
FORMAT Vertical;
|
||||
|
||||
FORMAT Vertical
|
||||
```
|
||||
```response
|
||||
Ok.
|
||||
|
||||
0 rows in set. Elapsed: 0.001 sec.
|
||||
```
|
||||
|
||||
```sql
|
||||
DESCRIBE TABLE tab
|
||||
FORMAT Vertical;
|
||||
DESCRIBE TABLE undropMe
|
||||
FORMAT Vertical
|
||||
```
|
||||
|
||||
```response
|
||||
Row 1:
|
||||
──────
|
||||
|
8
docs/en/sql-reference/table-functions/generateSeries.md
Normal file
8
docs/en/sql-reference/table-functions/generateSeries.md
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/generateSeries
|
||||
sidebar_position: 147
|
||||
sidebar_label: generateSeries
|
||||
---
|
||||
|
||||
### Alias To
|
||||
[generate_series](generate_series.md)
|
25
docs/en/sql-reference/table-functions/generate_series.md
Normal file
25
docs/en/sql-reference/table-functions/generate_series.md
Normal file
@ -0,0 +1,25 @@
|
||||
---
|
||||
slug: /en/sql-reference/table-functions/generate_series
|
||||
sidebar_position: 146
|
||||
sidebar_label: generate_series
|
||||
---
|
||||
|
||||
# generate_series
|
||||
|
||||
`generate_series(START, STOP)` - Returns a table with the single ‘generate_series’ column (UInt64) that contains integers from start to stop inclusively.
|
||||
|
||||
`generate_series(START, STOP, STEP)` - Returns a table with the single ‘generate_series’ column (UInt64) that contains integers from start to stop inclusively with spacing between values given by STEP.
|
||||
|
||||
The following queries return tables with the same content but different column names:
|
||||
|
||||
``` sql
|
||||
SELECT * FROM numbers(10, 5);
|
||||
SELECT * FROM generate_series(10, 14);
|
||||
```
|
||||
|
||||
And the following queries return tables with the same content but different column names (but the second option is more efficient):
|
||||
|
||||
``` sql
|
||||
SELECT * FROM numbers(10, 11) WHERE number % 3 == (10 % 3);
|
||||
SELECT * FROM generate_series(10, 20, 3) ;
|
||||
```
|
@ -8,6 +8,7 @@ sidebar_label: numbers
|
||||
|
||||
`numbers(N)` – Returns a table with the single ‘number’ column (UInt64) that contains integers from 0 to N-1.
|
||||
`numbers(N, M)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1).
|
||||
`numbers(N, M, S)` - Returns a table with the single ‘number’ column (UInt64) that contains integers from N to (N + M - 1) with step S.
|
||||
|
||||
Similar to the `system.numbers` table, it can be used for testing and generating successive values, `numbers(N, M)` more efficient than `system.numbers`.
|
||||
|
||||
@ -21,6 +22,15 @@ SELECT * FROM system.numbers WHERE number BETWEEN 0 AND 9;
|
||||
SELECT * FROM system.numbers WHERE number IN (0, 1, 2, 3, 4, 5, 6, 7, 8, 9);
|
||||
```
|
||||
|
||||
And the following queries are equivalent:
|
||||
|
||||
``` sql
|
||||
SELECT number * 2 FROM numbers(10);
|
||||
SELECT (number - 10) * 2 FROM numbers(10, 10);
|
||||
SELECT * FROM numbers(0, 20, 2);
|
||||
```
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
``` sql
|
||||
|
@ -34,8 +34,6 @@ suggests:
|
||||
contents:
|
||||
- src: root/usr/bin/clickhouse
|
||||
dst: /usr/bin/clickhouse
|
||||
- src: root/usr/bin/clickhouse-diagnostics
|
||||
dst: /usr/bin/clickhouse-diagnostics
|
||||
- src: root/usr/bin/clickhouse-extract-from-config
|
||||
dst: /usr/bin/clickhouse-extract-from-config
|
||||
- src: root/usr/bin/clickhouse-library-bridge
|
||||
|
@ -268,10 +268,6 @@ if (ENABLE_TESTS)
|
||||
add_dependencies(clickhouse-bundle clickhouse-tests)
|
||||
endif()
|
||||
|
||||
if (ENABLE_FUZZING)
|
||||
add_compile_definitions(FUZZING_MODE=1)
|
||||
endif ()
|
||||
|
||||
if (TARGET ch_contrib::protobuf)
|
||||
get_property(google_proto_files TARGET ch_contrib::protobuf PROPERTY google_proto_files)
|
||||
foreach (proto_file IN LISTS google_proto_files)
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include <Common/StudentTTest.h>
|
||||
#include <Common/CurrentMetrics.h>
|
||||
#include <Common/ErrorCodes.h>
|
||||
#include <Core/BaseSettingsProgramOptions.h>
|
||||
|
||||
|
||||
/** A tool for evaluating ClickHouse performance.
|
||||
@ -623,7 +624,7 @@ int mainEntryClickHouseBenchmark(int argc, char ** argv)
|
||||
;
|
||||
|
||||
Settings settings;
|
||||
settings.addProgramOptions(desc);
|
||||
addProgramOptions(settings, desc);
|
||||
|
||||
boost::program_options::variables_map options;
|
||||
boost::program_options::store(boost::program_options::parse_command_line(argc, argv, desc), options);
|
||||
|
@ -933,7 +933,7 @@ void Client::addOptions(OptionsDescription & options_description)
|
||||
("config,c", po::value<std::string>(), "config-file path (another shorthand)")
|
||||
("connection", po::value<std::string>(), "connection to use (from the client config), by default connection name is hostname")
|
||||
("secure,s", "Use TLS connection")
|
||||
("no-secure,s", "Don't use TLS connection")
|
||||
("no-secure", "Don't use TLS connection")
|
||||
("user,u", po::value<std::string>()->default_value("default"), "user")
|
||||
("password", po::value<std::string>(), "password")
|
||||
("ask-password", "ask-password")
|
||||
|
30
programs/diagnostics/.gitignore
vendored
30
programs/diagnostics/.gitignore
vendored
@ -1,30 +0,0 @@
|
||||
# If you prefer the allow list template instead of the deny list, see community template:
|
||||
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
|
||||
#
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Dependency directories (remove the comment below to include it)
|
||||
# vendor/
|
||||
|
||||
# Go workspace file
|
||||
go.work
|
||||
|
||||
.idea
|
||||
clickhouse-diagnostics
|
||||
output
|
||||
vendor
|
||||
bin
|
||||
profile.cov
|
||||
clickhouse-diagnostics.yml
|
||||
dist/
|
@ -1,49 +0,0 @@
|
||||
# Contribution
|
||||
|
||||
We keep things simple. Execute all commands in this folder.
|
||||
|
||||
## Requirements
|
||||
|
||||
- docker - tested on version 20.10.12.
|
||||
- golang >= go1.17.6
|
||||
|
||||
## Building
|
||||
|
||||
Creates a binary `clickhouse-diagnostics` in the local folder. Build will be versioned according to a timestamp. For a versioned release see [Releasing](#releasing).
|
||||
|
||||
```bash
|
||||
make build
|
||||
```
|
||||
|
||||
## Linting
|
||||
|
||||
We use [golangci-lint](https://golangci-lint.run/). We use a container to run so no need to install.
|
||||
|
||||
```bash
|
||||
make lint-go
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
```bash
|
||||
make test
|
||||
```
|
||||
|
||||
For a coverage report,
|
||||
|
||||
```bash
|
||||
make test-coverage
|
||||
```
|
||||
|
||||
## Adding Collectors
|
||||
|
||||
TODO
|
||||
|
||||
|
||||
## Adding Outputs
|
||||
|
||||
TODO
|
||||
|
||||
## Frames
|
||||
|
||||
## Parameter Types
|
@ -1,65 +0,0 @@
|
||||
GOCMD=go
|
||||
GOTEST=$(GOCMD) test
|
||||
BINARY_NAME=clickhouse-diagnostics
|
||||
BUILD_DIR=dist
|
||||
|
||||
TIMESTAMP := $(shell date +%Y%m%d-%H%M)
|
||||
COMMIT := $(shell git rev-parse --short HEAD)
|
||||
MODULE := github.com/ClickHouse/ClickHouse/programs/diagnostics
|
||||
VERSION := v.dev-${TIMESTAMP}
|
||||
DEVLDFLAGS = -ldflags "-X ${MODULE}/cmd.Version=${VERSION} -X ${MODULE}/cmd.Commit=${COMMIT}"
|
||||
|
||||
# override with env variable to test other versions e.g. 21.11.10.1
|
||||
CLICKHOUSE_VERSION ?= latest
|
||||
|
||||
GREEN := $(shell tput -Txterm setaf 2)
|
||||
YELLOW := $(shell tput -Txterm setaf 3)
|
||||
WHITE := $(shell tput -Txterm setaf 7)
|
||||
CYAN := $(shell tput -Txterm setaf 6)
|
||||
RESET := $(shell tput -Txterm sgr0)
|
||||
|
||||
.PHONY: all test build vendor release lint-go test-coverages dep
|
||||
|
||||
all: help
|
||||
|
||||
release: ## Release is delegated to goreleaser
|
||||
$(shell goreleaser release --rm-dist)
|
||||
|
||||
## Build:
|
||||
build: ## Build a binary for local use
|
||||
# timestamped version
|
||||
$(GOCMD) build ${DEVLDFLAGS} -o $(BINARY_NAME) ./cmd/clickhouse-diagnostics
|
||||
|
||||
clean: ## Remove build related file
|
||||
rm ${BINARY_NAME}
|
||||
rm -f checkstyle-report.xml ./coverage.xml ./profile.cov
|
||||
|
||||
vendor: ## Copy of all packages needed to support builds and tests in the vendor directory
|
||||
$(GOCMD) mod vendor
|
||||
|
||||
test: ## Run the tests of the project
|
||||
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -v -race `go list ./... | grep -v ./internal/platform/test`
|
||||
|
||||
test-no-docker: ## Don't run tests depending on dockerd
|
||||
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -v -race -tags no_docker `go list ./... | grep -v ./internal/platform/test`
|
||||
|
||||
lint-go: ## Use golintci-lint
|
||||
docker run --rm -v $(shell pwd):/app -w /app golangci/golangci-lint:latest-alpine golangci-lint run
|
||||
|
||||
test-coverage: ## Run the tests of the project and export the coverage
|
||||
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -cover -covermode=count -coverprofile=profile.cov `go list ./... | grep -v ./internal/platform/test`
|
||||
$(GOCMD) tool cover -func profile.cov
|
||||
|
||||
dep:
|
||||
$(shell go mod download)
|
||||
|
||||
help: ## Show this help.
|
||||
@echo ''
|
||||
@echo 'Usage:'
|
||||
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
|
||||
@echo ''
|
||||
@echo 'Targets:'
|
||||
@awk 'BEGIN {FS = ":.*?## "} { \
|
||||
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
|
||||
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
|
||||
}' $(MAKEFILE_LIST)
|
@ -1,167 +0,0 @@
|
||||
# Clickhouse Diagnostics Tool
|
||||
|
||||
## Purpose
|
||||
|
||||
This tool provides a means of obtaining a diagnostic bundle from a ClickHouse instance. This bundle can be provided to your nearest ClickHouse support provider in order to assist with the diagnosis of issues.
|
||||
|
||||
## Design Philosophy
|
||||
|
||||
- **No local dependencies** to run. We compile to a platform-independent binary, hence Go.
|
||||
- **Minimize resource overhead**. Improvements always welcome.
|
||||
- **Extendable framework**. At its core, the tool provides collectors and outputs. Collectors are independent and are responsible for collecting a specific dataset e.g. system configuration. Outputs produce the diagnostic bundle in a specific format. It should be trivial to add both for contributors. See [Collectors](#collectors) and [Outputs](#outputs) for more details.
|
||||
- **Convertible output formats**. Outputs produce diagnostic bundles in different formats e.g. archive, simple report etc. Where possible, it should be possible to convert between these formats. For example, an administrator may provide a bundle as an archive to their support provider who in turn wishes to visualise this as a report or even in ClickHouse itself...
|
||||
- **Something is better than nothing**. Collectors execute independently. We never fail a collection because one fails - preferring to warn the user only. There are good reasons for a collector failure e.g. insufficient permissions or missing data.
|
||||
- **Execute anywhere** - Ideally, this tool is executed on a ClickHouse host. Some collectors e.g. configuration file collection or system information, rely on this. However, collectors will obtain as much information remotely from the database as possible if executed remotely from the cluster - warning where collection fails. **We do currently require ClickHouse to be running, connecting over the native port**.
|
||||
|
||||
We recommend reading [Permissions, Warnings & Locality](#permissions-warnings--locality).
|
||||
|
||||
## Usage
|
||||
|
||||
### Collection
|
||||
|
||||
The `collect` command allows the collection of a diagnostic bundle. In its simplest form, assuming ClickHouse is running locally on default ports with no password:
|
||||
|
||||
```bash
|
||||
clickhouse-diagnostics collect
|
||||
```
|
||||
|
||||
This will use the default collectors and the simple output. This output produces a timestamped archive bundle in `gz` format in a sub folder named after the host. This folder name can be controlled via the parameter `--id` or configured directly for the simple output parameter `output.simple.folder` (this allows a specific directory to be specified).
|
||||
|
||||
Collectors, Outputs and ClickHouse connection credentials can be specified as shown below:
|
||||
|
||||
```bash
|
||||
clickhouse-diagnostics collect --password random --username default --collector=system_db,system --output=simple --id my_cluster_name
|
||||
```
|
||||
|
||||
This collects the system database and host information from the cluster running locally. The archive bundle will be produced under a folder `my_cluster_name`.
|
||||
|
||||
For further details, use the in built help (the commands below are equivalent):
|
||||
|
||||
```bash
|
||||
clickhouse-diagnostics collect --help
|
||||
./clickhouse-diagnostics help collect
|
||||
```
|
||||
|
||||
### Help & Finding parameters for collectors & outputs
|
||||
|
||||
Collectors and outputs have their own parameters not listed under the help for the command for the `collect` command. These can be identified using the `help` command. Specifically,
|
||||
|
||||
For more information about a specific collector.
|
||||
|
||||
```bash
|
||||
Use "clickhouse-diagnostics help --collector [collector]"
|
||||
```
|
||||
|
||||
For more information about a specific output.
|
||||
|
||||
```bash
|
||||
Use "clickhouse-diagnostics help --output [output]"
|
||||
```
|
||||
|
||||
### Convert
|
||||
|
||||
Coming soon to a cluster near you...
|
||||
|
||||
## Collectors
|
||||
|
||||
We currently support the following collectors. A `*` indicates this collector is enabled by default:
|
||||
|
||||
- `system_db*` - Collects all tables in the system database, except those which have been excluded and up to a specified row limit.
|
||||
- `system*` - Collects summary OS and hardware statistics for the host.
|
||||
- `config*` - Collects the ClickHouse configuration from the local filesystem. A best effort is made using process information if ClickHouse is not installed locally. `include_path` are also considered.
|
||||
- `db_logs*` - Collects the ClickHouse logs directly from the database.
|
||||
- `logs*` - Collects the ClickHouse logs directly from the database.
|
||||
- `summary*` - Collects summary statistics on the database based on a set of known useful queries. This represents the easiest collector to extend - contributions are welcome to this set which can be found [here](https://github.com/ClickHouse/ClickHouse/blob/master/programs/diagnostics/internal/collectors/clickhouse/queries.json).
|
||||
- `file` - Collects files based on glob patterns. Does not collect directories. To preview files which will be collected try, `clickhouse-diagnostics collect --collectors=file --collector.file.file_pattern=<glob path> --output report`
|
||||
- `command` - Collects the output of a user specified command. To preview output, `clickhouse-diagnostics collect --collectors=command --collector.command.command="<command>" --output report`
|
||||
- `zookeeper_db` - Collects information about zookeeper using the `system.zookeeper` table, recursively iterating the zookeeper tree/table. Note: changing the default parameter values can cause extremely high load to be placed on the database. Use with caution. By default, uses the glob `/clickhouse/{task_queue}/**` to match zookeeper paths and iterates to a max depth of 8.
|
||||
|
||||
## Outputs
|
||||
|
||||
We currently support the following outputs. The `simple` output is currently the default:
|
||||
|
||||
- `simple` - Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive.
|
||||
- `report` - Writes out the diagnostic bundle to the terminal as a simple report. Supports an ascii table format or markdown.
|
||||
- `clickhouse` - **Under development**. This will allow a bundle to be stored in a cluster allowing visualization in common tooling e.g. Grafana.
|
||||
|
||||
## Simple Output
|
||||
|
||||
Since the `simple` output is the default we provide additional details here.
|
||||
This output produces a timestamped archive by default in `gz` format under a directory created with either the hostname of the specified collection `--id`. As shown below, a specific folder can also be specified. Compression can also be disabled, leaving just the contents of the folder:
|
||||
|
||||
```bash
|
||||
./clickhouse-diagnostics help --output simple
|
||||
|
||||
Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive.
|
||||
|
||||
Usage:
|
||||
--output=simple [flags]
|
||||
|
||||
Flags:
|
||||
--output.simple.directory string Directory in which to create dump. Defaults to the current directory. (default "./")
|
||||
--output.simple.format string Format of exported files (default "csv")
|
||||
--output.simple.skip_archive Don't compress output to an archive
|
||||
```
|
||||
|
||||
The archive itself contains a folder for each collector. Each collector can potentially produce many discrete sets of data, known as frames. Each of these typically results in a single file within the collector's folder. For example, each query for the `summary` collector results in a correspondingly named file within the `summary` folder.
|
||||
|
||||
## Permissions, Warnings & Locality
|
||||
|
||||
Some collectors either require specific permissions for complete collection or should be executed on a ClickHouse host. We aim to collate these requirements below:
|
||||
|
||||
- `system_db` - This collect aims to collect all tables in the `system` database. Some tables may fail if certain features are not enabled. Specifically,[allow_introspection_functions](https://clickhouse.com/docs/en/operations/settings/settings/#settings-allow_introspection_functions) is required to collect the `stack_traces` table. [access_management](https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting) must be set for the ClickHouse user specified for collection, to permit access to access management tables e.g. `quota_usage`.
|
||||
- `db_logs`- The ClickHouse user must have access to the tables `query_log`,`query_thread_log` and `text_log`.
|
||||
- `logs` - The system user under which the tool is executed must have access to the logs directory. It must therefore also be executed on the target ClickHouse server directly for this collector work. In cases where the logs directory is not a default location e.g. `/var/log/clickhouse-server` we will attempt to establish the location from the ClickHouse configuration. This requires permissions to read the configuration files - which in most cases requires specific permissions to be granted to the run user if you are not comfortable executing the tool under sudo or the `clickhouse` user.
|
||||
- `summary`- This collector executes pre-recorded queries. Some of these read tables concerning access management, thus requiring the ClickHouse user to have the [access_management](https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting) permission.
|
||||
- `config` - This collector reads and copies the local configuration files. It thus requires permissions to read the configuration files - which in most cases requires specific permissions to be granted to the run user if you are not comfortable executing the tool under sudo or the `clickhouse` user.
|
||||
|
||||
**If a collector cannot collect specific data because of either execution location or permissions, it will log a warning to the terminal.**
|
||||
|
||||
## Logging
|
||||
|
||||
All logs are output to `stderr`. `stdout` is used exclusively for outputs to print information.
|
||||
|
||||
## Configuration file
|
||||
|
||||
In addition to supporting parameters via the command line, a configuration file can be specified via the `--config`, `-f` flag.
|
||||
|
||||
By default, we look for a configuration file `clickhouse-diagnostics.yml` in the same directory as the binary. If not present, we revert to command line flags.
|
||||
|
||||
**Values set via the command line values always take precedence over those in the configuration file.**
|
||||
|
||||
All parameters can be set via the configuration file and can in most cases be converted to a yaml hierarchy, where periods indicate a nesting. For example,
|
||||
|
||||
`--collector.system_db.row_limit=1`
|
||||
|
||||
becomes
|
||||
|
||||
```yaml
|
||||
collector:
|
||||
system_db:
|
||||
row_limit: 1
|
||||
```
|
||||
|
||||
The following exceptions exist to avoid collisions:
|
||||
|
||||
| Command | Parameter | Configuration File |
|
||||
|---------|------------|--------------------|
|
||||
| collect | output | collect.output |
|
||||
| collect | collectors | collect.collectors |
|
||||
|
||||
## FAQ
|
||||
|
||||
1. Does the collector need root permissions?
|
||||
|
||||
No. However, to read some local files e.g. configurations, the tool should be executed as the `clickhouse` user.
|
||||
|
||||
2. What ClickHouse database permissions does the collector need?
|
||||
|
||||
Read permissions on all system tables are required in most cases - although only specific collectors need this. [Access management permissions]((https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting)) will ensure full collection.
|
||||
|
||||
3. Is any processing done on logs for anonimization purposes?
|
||||
|
||||
Currently no. ClickHouse should not log sensitive information to logs e.g. passwords.
|
||||
|
||||
4. Is sensitive information removed from configuration files e.g. passwords?
|
||||
|
||||
Yes. We remove both passwords and hashed passwords. Please raise an issue if you require further information to be anonimized. We appreciate this is a sensitive topic.
|
@ -1,9 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cmd.Execute()
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/terminal"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
var id string
|
||||
var output = params.StringOptionsVar{
|
||||
Options: outputs.GetOutputNames(),
|
||||
Value: "simple",
|
||||
}
|
||||
|
||||
// access credentials
|
||||
var host string
|
||||
var port uint16
|
||||
var username string
|
||||
var password string
|
||||
|
||||
var collectorNames = params.StringSliceOptionsVar{
|
||||
Options: collectors.GetCollectorNames(false),
|
||||
Values: collectors.GetCollectorNames(true),
|
||||
}
|
||||
|
||||
// holds the collector params passed by the cli
|
||||
var collectorParams params.ParamMap
|
||||
|
||||
// holds the output params passed by the cli
|
||||
var outputParams params.ParamMap
|
||||
|
||||
const collectHelpTemplate = `Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
|
||||
|
||||
Global Flags:
|
||||
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}
|
||||
|
||||
Additional help topics:
|
||||
Use "{{.CommandPath}} [command] --help" for more information about a command.
|
||||
Use "{{.Parent.Name}} help --collector [collector]" for more information about a specific collector.
|
||||
Use "{{.Parent.Name}} help --output [output]" for more information about a specific output.
|
||||
`
|
||||
|
||||
func init() {
|
||||
collectCmd.Flags().StringVar(&id, "id", getHostName(), "Id of diagnostic bundle")
|
||||
|
||||
// access credentials
|
||||
collectCmd.Flags().StringVar(&host, "host", "localhost", "ClickHouse host")
|
||||
collectCmd.Flags().Uint16VarP(&port, "port", "p", 9000, "ClickHouse native port")
|
||||
collectCmd.Flags().StringVarP(&username, "username", "u", "", "ClickHouse username")
|
||||
collectCmd.Flags().StringVar(&password, "password", "", "ClickHouse password")
|
||||
// collectors and outputs
|
||||
collectCmd.Flags().VarP(&output, "output", "o", fmt.Sprintf("Output Format for the diagnostic Bundle, options: [%s]\n", strings.Join(output.Options, ",")))
|
||||
collectCmd.Flags().VarP(&collectorNames, "collectors", "c", fmt.Sprintf("Collectors to use, options: [%s]\n", strings.Join(collectorNames.Options, ",")))
|
||||
|
||||
collectorConfigs, err := collectors.BuildConfigurationOptions()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Unable to build collector configurations")
|
||||
}
|
||||
collectorParams = params.NewParamMap(collectorConfigs)
|
||||
|
||||
outputConfigs, err := outputs.BuildConfigurationOptions()
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msg("Unable to build output configurations")
|
||||
}
|
||||
params.AddParamMapToCmd(collectorParams, collectCmd, "collector", true)
|
||||
|
||||
outputParams = params.NewParamMap(outputConfigs)
|
||||
params.AddParamMapToCmd(outputParams, collectCmd, "output", true)
|
||||
|
||||
collectCmd.SetFlagErrorFunc(handleFlagErrors)
|
||||
collectCmd.SetHelpTemplate(collectHelpTemplate)
|
||||
rootCmd.AddCommand(collectCmd)
|
||||
}
|
||||
|
||||
var collectCmd = &cobra.Command{
|
||||
Use: "collect",
|
||||
Short: "Collect a diagnostic bundle",
|
||||
Long: `Collect a ClickHouse diagnostic bundle for a specified ClickHouse instance`,
|
||||
PreRun: func(cmd *cobra.Command, args []string) {
|
||||
bindFlagsToConfig(cmd)
|
||||
},
|
||||
Example: fmt.Sprintf(`%s collect --username default --collector=%s --output=simple`, rootCmd.Name(), strings.Join(collectorNames.Options[:2], ",")),
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
log.Info().Msgf("executing collect command with %v collectors and %s output", collectorNames.Values, output.Value)
|
||||
outputConfig := params.ConvertParamsToConfig(outputParams)[output.Value]
|
||||
runConfig := internal.NewRunConfiguration(id, host, port, username, password, output.Value, outputConfig, collectorNames.Values, params.ConvertParamsToConfig(collectorParams))
|
||||
internal.Capture(runConfig)
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
func getHostName() string {
|
||||
name, err := os.Hostname()
|
||||
if err != nil {
|
||||
name = "clickhouse-diagnostics"
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
// these flags are nested under the cmd name in the config file to prevent collisions
|
||||
var flagsToNest = []string{"output", "collectors"}
|
||||
|
||||
// this saves us binding each command manually to viper
|
||||
func bindFlagsToConfig(cmd *cobra.Command) {
|
||||
cmd.Flags().VisitAll(func(f *pflag.Flag) {
|
||||
err := viper.BindEnv(f.Name, fmt.Sprintf("%s_%s", envPrefix,
|
||||
strings.ToUpper(strings.Replace(f.Name, ".", "_", -1))))
|
||||
if err != nil {
|
||||
log.Error().Msgf("Unable to bind %s to config", f.Name)
|
||||
}
|
||||
configFlagName := f.Name
|
||||
if utils.Contains(flagsToNest, f.Name) {
|
||||
configFlagName = fmt.Sprintf("%s.%s", cmd.Use, configFlagName)
|
||||
}
|
||||
err = viper.BindPFlag(configFlagName, f)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Unable to bind %s to config", f.Name)
|
||||
}
|
||||
// here we prefer the config value when the param is not set on the cmd line
|
||||
if !f.Changed && viper.IsSet(configFlagName) {
|
||||
val := viper.Get(configFlagName)
|
||||
log.Debug().Msgf("Setting parameter %s from configuration file", f.Name)
|
||||
err = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
|
||||
if err != nil {
|
||||
log.Error().Msgf("Unable to read \"%s\" value from config", f.Name)
|
||||
} else {
|
||||
log.Debug().Msgf("Set parameter \"%s\" from configuration", f.Name)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
@ -1 +0,0 @@
|
||||
package cmd
|
@ -1,124 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var cHelp = params.StringOptionsVar{
|
||||
Options: collectors.GetCollectorNames(false),
|
||||
Value: "",
|
||||
}
|
||||
var oHelp = params.StringOptionsVar{
|
||||
Options: outputs.GetOutputNames(),
|
||||
Value: "",
|
||||
}
|
||||
|
||||
func init() {
|
||||
helpCmd.Flags().VarP(&cHelp, "collector", "c", "Specify collector to get description of available flags")
|
||||
helpCmd.Flags().VarP(&oHelp, "output", "o", "Specify output to get description of available flags")
|
||||
helpCmd.SetUsageTemplate(`Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}
|
||||
|
||||
Available Commands:{{range .Parent.Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}
|
||||
|
||||
Alternatively use "{{.CommandPath}} [command] --help" for more information about a command.
|
||||
`)
|
||||
helpCmd.SetFlagErrorFunc(handleFlagErrors)
|
||||
|
||||
}
|
||||
|
||||
var helpCmd = &cobra.Command{
|
||||
Use: "help [command]",
|
||||
Short: "Help about any command, collector or output",
|
||||
Long: `Help provides help for any command, collector or output in the application.`,
|
||||
Example: fmt.Sprintf(`%[1]v help collect
|
||||
%[1]v help --collector=config
|
||||
%[1]v help --output=simple`, rootCmd.Name()),
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 0 {
|
||||
//find the command on which help is requested
|
||||
cmd, _, e := c.Root().Find(args)
|
||||
if cmd == nil || e != nil {
|
||||
c.Printf("Unknown help topic %#q\n", args)
|
||||
cobra.CheckErr(c.Root().Usage())
|
||||
} else {
|
||||
cmd.InitDefaultHelpFlag()
|
||||
cobra.CheckErr(cmd.Help())
|
||||
}
|
||||
return
|
||||
}
|
||||
if cHelp.Value != "" && oHelp.Value != "" {
|
||||
log.Error().Msg("Specify either --collector or --output not both")
|
||||
_ = c.Help()
|
||||
os.Exit(1)
|
||||
}
|
||||
if cHelp.Value != "" {
|
||||
collector, err := collectors.GetCollectorByName(cHelp.Value)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("Unable to initialize collector %s", cHelp.Value)
|
||||
}
|
||||
configHelp(collector.Configuration(), "collector", cHelp.Value, collector.Description())
|
||||
} else if oHelp.Value != "" {
|
||||
output, err := outputs.GetOutputByName(oHelp.Value)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err).Msgf("Unable to initialize output %s", oHelp.Value)
|
||||
}
|
||||
configHelp(output.Configuration(), "output", oHelp.Value, output.Description())
|
||||
} else {
|
||||
_ = c.Help()
|
||||
}
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
func configHelp(conf config.Configuration, componentType, name, description string) {
|
||||
|
||||
paramMap := params.NewParamMap(map[string]config.Configuration{
|
||||
name: conf,
|
||||
})
|
||||
tempHelpCmd := &cobra.Command{
|
||||
Use: fmt.Sprintf("--%s=%s", componentType, name),
|
||||
Short: fmt.Sprintf("Help about the %s collector", name),
|
||||
Long: description,
|
||||
SilenceErrors: true,
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
_ = c.Help()
|
||||
},
|
||||
}
|
||||
params.AddParamMapToCmd(paramMap, tempHelpCmd, componentType, false)
|
||||
// this is workaround to hide the help flag
|
||||
tempHelpCmd.Flags().BoolP("help", "h", false, "Dummy help")
|
||||
tempHelpCmd.Flags().Lookup("help").Hidden = true
|
||||
tempHelpCmd.SetUsageTemplate(`
|
||||
{{.Long}}
|
||||
|
||||
Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}
|
||||
|
||||
Flags:{{if .HasAvailableLocalFlags}}
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{else}}
|
||||
|
||||
No configuration flags available
|
||||
{{end}}
|
||||
`)
|
||||
|
||||
_ = tempHelpCmd.Execute()
|
||||
}
|
@ -1,281 +0,0 @@
|
||||
package params
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type cliParamType uint8
|
||||
|
||||
const (
|
||||
String cliParamType = iota
|
||||
StringList
|
||||
StringOptionsList
|
||||
Integer
|
||||
Boolean
|
||||
)
|
||||
|
||||
type CliParam struct {
|
||||
Description string
|
||||
Default interface{}
|
||||
//this should always be an address to a value - as required by cobra
|
||||
Value interface{}
|
||||
Type cliParamType
|
||||
}
|
||||
|
||||
type ParamMap map[string]map[string]CliParam
|
||||
|
||||
func NewParamMap(configs map[string]config.Configuration) ParamMap {
|
||||
paramMap := make(ParamMap)
|
||||
for name, configuration := range configs {
|
||||
for _, param := range configuration.Params {
|
||||
switch p := param.(type) {
|
||||
case config.StringParam:
|
||||
paramMap = paramMap.createStringParam(name, p)
|
||||
case config.StringListParam:
|
||||
paramMap = paramMap.createStringListParam(name, p)
|
||||
case config.StringOptions:
|
||||
paramMap = paramMap.createStringOptionsParam(name, p)
|
||||
case config.IntParam:
|
||||
paramMap = paramMap.createIntegerParam(name, p)
|
||||
case config.BoolParam:
|
||||
paramMap = paramMap.createBoolParam(name, p)
|
||||
}
|
||||
}
|
||||
}
|
||||
return paramMap
|
||||
}
|
||||
|
||||
func (m ParamMap) createBoolParam(rootKey string, bParam config.BoolParam) ParamMap {
|
||||
if _, ok := m[rootKey]; !ok {
|
||||
m[rootKey] = make(map[string]CliParam)
|
||||
}
|
||||
var value bool
|
||||
param := CliParam{
|
||||
Description: bParam.Description(),
|
||||
Default: bParam.Value,
|
||||
Value: &value,
|
||||
Type: Boolean,
|
||||
}
|
||||
m[rootKey][bParam.Name()] = param
|
||||
return m
|
||||
}
|
||||
|
||||
func (m ParamMap) createStringParam(rootKey string, sParam config.StringParam) ParamMap {
|
||||
if _, ok := m[rootKey]; !ok {
|
||||
m[rootKey] = make(map[string]CliParam)
|
||||
}
|
||||
var value string
|
||||
param := CliParam{
|
||||
Description: sParam.Description(),
|
||||
Default: sParam.Value,
|
||||
Value: &value,
|
||||
Type: String,
|
||||
}
|
||||
m[rootKey][sParam.Name()] = param
|
||||
return m
|
||||
}
|
||||
|
||||
func (m ParamMap) createStringListParam(rootKey string, lParam config.StringListParam) ParamMap {
|
||||
if _, ok := m[rootKey]; !ok {
|
||||
m[rootKey] = make(map[string]CliParam)
|
||||
}
|
||||
var value []string
|
||||
param := CliParam{
|
||||
Description: lParam.Description(),
|
||||
Default: lParam.Values,
|
||||
Value: &value,
|
||||
Type: StringList,
|
||||
}
|
||||
m[rootKey][lParam.Name()] = param
|
||||
return m
|
||||
}
|
||||
|
||||
func (m ParamMap) createStringOptionsParam(rootKey string, oParam config.StringOptions) ParamMap {
|
||||
if _, ok := m[rootKey]; !ok {
|
||||
m[rootKey] = make(map[string]CliParam)
|
||||
}
|
||||
value := StringOptionsVar{
|
||||
Options: oParam.Options,
|
||||
Value: oParam.Value,
|
||||
}
|
||||
param := CliParam{
|
||||
Description: oParam.Description(),
|
||||
Default: oParam.Value,
|
||||
Value: &value,
|
||||
Type: StringOptionsList,
|
||||
}
|
||||
m[rootKey][oParam.Name()] = param
|
||||
return m
|
||||
}
|
||||
|
||||
func (m ParamMap) createIntegerParam(rootKey string, iParam config.IntParam) ParamMap {
|
||||
if _, ok := m[rootKey]; !ok {
|
||||
m[rootKey] = make(map[string]CliParam)
|
||||
}
|
||||
var value int64
|
||||
param := CliParam{
|
||||
Description: iParam.Description(),
|
||||
Default: iParam.Value,
|
||||
Value: &value,
|
||||
Type: Integer,
|
||||
}
|
||||
m[rootKey][iParam.Name()] = param
|
||||
return m
|
||||
}
|
||||
|
||||
func (c CliParam) GetConfigParam(name string) config.ConfigParam {
|
||||
// this is a config being passed to a collector - required can be false
|
||||
param := config.NewParam(name, c.Description, false)
|
||||
switch c.Type {
|
||||
case String:
|
||||
return config.StringParam{
|
||||
Param: param,
|
||||
// values will be pointers
|
||||
Value: *(c.Value.(*string)),
|
||||
}
|
||||
case StringList:
|
||||
return config.StringListParam{
|
||||
Param: param,
|
||||
Values: *(c.Value.(*[]string)),
|
||||
}
|
||||
case StringOptionsList:
|
||||
optionsVar := *(c.Value.(*StringOptionsVar))
|
||||
return config.StringOptions{
|
||||
Param: param,
|
||||
Options: optionsVar.Options,
|
||||
Value: optionsVar.Value,
|
||||
}
|
||||
case Integer:
|
||||
return config.IntParam{
|
||||
Param: param,
|
||||
Value: *(c.Value.(*int64)),
|
||||
}
|
||||
case Boolean:
|
||||
return config.BoolParam{
|
||||
Param: param,
|
||||
Value: *(c.Value.(*bool)),
|
||||
}
|
||||
}
|
||||
return param
|
||||
}
|
||||
|
||||
type StringOptionsVar struct {
|
||||
Options []string
|
||||
Value string
|
||||
}
|
||||
|
||||
func (o StringOptionsVar) String() string {
|
||||
return o.Value
|
||||
}
|
||||
|
||||
func (o *StringOptionsVar) Set(p string) error {
|
||||
isIncluded := func(opts []string, val string) bool {
|
||||
for _, opt := range opts {
|
||||
if val == opt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
if !isIncluded(o.Options, p) {
|
||||
return fmt.Errorf("%s is not included in options: %v", p, o.Options)
|
||||
}
|
||||
o.Value = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *StringOptionsVar) Type() string {
|
||||
return "string"
|
||||
}
|
||||
|
||||
type StringSliceOptionsVar struct {
|
||||
Options []string
|
||||
Values []string
|
||||
}
|
||||
|
||||
func (o StringSliceOptionsVar) String() string {
|
||||
str, _ := writeAsCSV(o.Values)
|
||||
return "[" + str + "]"
|
||||
}
|
||||
|
||||
func (o *StringSliceOptionsVar) Set(val string) error {
|
||||
values, err := readAsCSV(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
vValues := utils.Distinct(values, o.Options)
|
||||
if len(vValues) > 0 {
|
||||
return fmt.Errorf("%v are not included in options: %v", vValues, o.Options)
|
||||
}
|
||||
o.Values = values
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *StringSliceOptionsVar) Type() string {
|
||||
return "stringSlice"
|
||||
}
|
||||
|
||||
func writeAsCSV(vals []string) (string, error) {
|
||||
b := &bytes.Buffer{}
|
||||
w := csv.NewWriter(b)
|
||||
err := w.Write(vals)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
w.Flush()
|
||||
return strings.TrimSuffix(b.String(), "\n"), nil
|
||||
}
|
||||
|
||||
func readAsCSV(val string) ([]string, error) {
|
||||
if val == "" {
|
||||
return []string{}, nil
|
||||
}
|
||||
stringReader := strings.NewReader(val)
|
||||
csvReader := csv.NewReader(stringReader)
|
||||
return csvReader.Read()
|
||||
}
|
||||
|
||||
func AddParamMapToCmd(paramMap ParamMap, cmd *cobra.Command, prefix string, hide bool) {
|
||||
for rootKey, childMap := range paramMap {
|
||||
for childKey, value := range childMap {
|
||||
paramName := fmt.Sprintf("%s.%s.%s", prefix, rootKey, childKey)
|
||||
switch value.Type {
|
||||
case String:
|
||||
cmd.Flags().StringVar(value.Value.(*string), paramName, value.Default.(string), value.Description)
|
||||
case StringList:
|
||||
cmd.Flags().StringSliceVar(value.Value.(*[]string), paramName, value.Default.([]string), value.Description)
|
||||
case StringOptionsList:
|
||||
cmd.Flags().Var(value.Value.(*StringOptionsVar), paramName, value.Description)
|
||||
case Integer:
|
||||
cmd.Flags().Int64Var(value.Value.(*int64), paramName, value.Default.(int64), value.Description)
|
||||
case Boolean:
|
||||
cmd.Flags().BoolVar(value.Value.(*bool), paramName, value.Default.(bool), value.Description)
|
||||
}
|
||||
// this ensures flags from collectors and outputs are not shown as they will pollute the output
|
||||
if hide {
|
||||
_ = cmd.Flags().MarkHidden(paramName)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertParamsToConfig(paramMap ParamMap) map[string]config.Configuration {
|
||||
configuration := make(map[string]config.Configuration)
|
||||
for rootKey, childMap := range paramMap {
|
||||
if _, ok := configuration[rootKey]; !ok {
|
||||
configuration[rootKey] = config.Configuration{}
|
||||
}
|
||||
for childKey, value := range childMap {
|
||||
configParam := value.GetConfigParam(childKey)
|
||||
configuration[rootKey] = config.Configuration{Params: append(configuration[rootKey].Params, configParam)}
|
||||
}
|
||||
}
|
||||
return configuration
|
||||
}
|
@ -1,247 +0,0 @@
|
||||
package params_test
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var conf = map[string]config.Configuration{
|
||||
"config": {
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("directory", "A directory", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"system": {
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Include tables", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
Values: []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"},
|
||||
Param: config.NewParam("exclude_tables", "Excluded tables", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 100000,
|
||||
Param: config.NewParam("row_limit", "Max rows", false),
|
||||
},
|
||||
},
|
||||
},
|
||||
"reader": {
|
||||
Params: []config.ConfigParam{
|
||||
config.StringOptions{
|
||||
Value: "csv",
|
||||
Options: []string{"csv"},
|
||||
Param: config.NewParam("format", "Format of imported files", false),
|
||||
},
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("collect_archives", "Collect archives", false),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestNewParamMap(t *testing.T) {
|
||||
// test each of the types via NewParamMap - one with each type. the keys here can represent anything e.g. a collector name
|
||||
t.Run("test param map correctly converts types", func(t *testing.T) {
|
||||
paramMap := params.NewParamMap(conf)
|
||||
require.Len(t, paramMap, 3)
|
||||
// check config
|
||||
require.Contains(t, paramMap, "config")
|
||||
require.Len(t, paramMap["config"], 1)
|
||||
require.Contains(t, paramMap["config"], "directory")
|
||||
require.IsType(t, params.CliParam{}, paramMap["config"]["directory"])
|
||||
require.Equal(t, "A directory", paramMap["config"]["directory"].Description)
|
||||
require.Equal(t, "", *(paramMap["config"]["directory"].Value.(*string)))
|
||||
require.Equal(t, "", paramMap["config"]["directory"].Default)
|
||||
require.Equal(t, params.String, paramMap["config"]["directory"].Type)
|
||||
// check system
|
||||
require.Contains(t, paramMap, "system")
|
||||
require.Len(t, paramMap["system"], 3)
|
||||
require.IsType(t, params.CliParam{}, paramMap["system"]["include_tables"])
|
||||
|
||||
require.Equal(t, "Include tables", paramMap["system"]["include_tables"].Description)
|
||||
var value []string
|
||||
require.Equal(t, &value, paramMap["system"]["include_tables"].Value)
|
||||
require.Equal(t, value, paramMap["system"]["include_tables"].Default)
|
||||
require.Equal(t, params.StringList, paramMap["system"]["include_tables"].Type)
|
||||
|
||||
require.Equal(t, "Excluded tables", paramMap["system"]["exclude_tables"].Description)
|
||||
require.IsType(t, params.CliParam{}, paramMap["system"]["exclude_tables"])
|
||||
require.Equal(t, &value, paramMap["system"]["exclude_tables"].Value)
|
||||
require.Equal(t, []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"}, paramMap["system"]["exclude_tables"].Default)
|
||||
require.Equal(t, params.StringList, paramMap["system"]["exclude_tables"].Type)
|
||||
|
||||
require.Equal(t, "Max rows", paramMap["system"]["row_limit"].Description)
|
||||
require.IsType(t, params.CliParam{}, paramMap["system"]["row_limit"])
|
||||
var iValue int64
|
||||
require.Equal(t, &iValue, paramMap["system"]["row_limit"].Value)
|
||||
require.Equal(t, int64(100000), paramMap["system"]["row_limit"].Default)
|
||||
require.Equal(t, params.Integer, paramMap["system"]["row_limit"].Type)
|
||||
|
||||
// check reader
|
||||
require.Contains(t, paramMap, "reader")
|
||||
require.Len(t, paramMap["reader"], 2)
|
||||
require.IsType(t, params.CliParam{}, paramMap["reader"]["format"])
|
||||
require.Equal(t, "Format of imported files", paramMap["reader"]["format"].Description)
|
||||
require.IsType(t, params.CliParam{}, paramMap["reader"]["format"])
|
||||
oValue := params.StringOptionsVar{
|
||||
Options: []string{"csv"},
|
||||
Value: "csv",
|
||||
}
|
||||
require.Equal(t, &oValue, paramMap["reader"]["format"].Value)
|
||||
require.Equal(t, "csv", paramMap["reader"]["format"].Default)
|
||||
require.Equal(t, params.StringOptionsList, paramMap["reader"]["format"].Type)
|
||||
|
||||
require.IsType(t, params.CliParam{}, paramMap["reader"]["collect_archives"])
|
||||
require.Equal(t, "Collect archives", paramMap["reader"]["collect_archives"].Description)
|
||||
require.IsType(t, params.CliParam{}, paramMap["reader"]["collect_archives"])
|
||||
var bVar bool
|
||||
require.Equal(t, &bVar, paramMap["reader"]["collect_archives"].Value)
|
||||
require.Equal(t, true, paramMap["reader"]["collect_archives"].Default)
|
||||
require.Equal(t, params.Boolean, paramMap["reader"]["collect_archives"].Type)
|
||||
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// test GetConfigParam
|
||||
func TestConvertParamsToConfig(t *testing.T) {
|
||||
paramMap := params.NewParamMap(conf)
|
||||
t.Run("test we can convert a param map back to a config", func(t *testing.T) {
|
||||
cParam := params.ConvertParamsToConfig(paramMap)
|
||||
// these will not be equal as we have some information loss e.g. allowEmpty
|
||||
//require.Equal(t, conf, cParam)
|
||||
// deep equality
|
||||
for name := range conf {
|
||||
require.Equal(t, len(conf[name].Params), len(cParam[name].Params))
|
||||
// sort both consistently
|
||||
sort.Slice(conf[name].Params, func(i, j int) bool {
|
||||
return conf[name].Params[i].Name() < conf[name].Params[j].Name()
|
||||
})
|
||||
sort.Slice(cParam[name].Params, func(i, j int) bool {
|
||||
return cParam[name].Params[i].Name() < cParam[name].Params[j].Name()
|
||||
})
|
||||
for i, param := range conf[name].Params {
|
||||
require.Equal(t, param.Required(), cParam[name].Params[i].Required())
|
||||
require.Equal(t, param.Name(), cParam[name].Params[i].Name())
|
||||
require.Equal(t, param.Description(), cParam[name].Params[i].Description())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// create via NewParamMap and add to command AddParamMapToCmd - check contents
|
||||
func TestAddParamMapToCmd(t *testing.T) {
|
||||
paramMap := params.NewParamMap(conf)
|
||||
t.Run("test we can add hidden params to a command", func(t *testing.T) {
|
||||
testComand := &cobra.Command{
|
||||
Use: "test",
|
||||
Short: "Run a test",
|
||||
Long: `Longer description`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
params.AddParamMapToCmd(paramMap, testComand, "collector", true)
|
||||
// check we get an error on one which doesn't exist
|
||||
_, err := testComand.Flags().GetString("collector.config.random")
|
||||
require.NotNil(t, err)
|
||||
// check getting incorrect type
|
||||
_, err = testComand.Flags().GetString("collector.system.include_tables")
|
||||
require.NotNil(t, err)
|
||||
|
||||
// check existence of all flags
|
||||
directory, err := testComand.Flags().GetString("collector.config.directory")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "", directory)
|
||||
|
||||
includeTables, err := testComand.Flags().GetStringSlice("collector.system.include_tables")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, []string{}, includeTables)
|
||||
|
||||
excludeTables, err := testComand.Flags().GetStringSlice("collector.system.exclude_tables")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"}, excludeTables)
|
||||
|
||||
rowLimit, err := testComand.Flags().GetInt64("collector.system.row_limit")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(100000), rowLimit)
|
||||
|
||||
format, err := testComand.Flags().GetString("collector.reader.format")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "csv", format)
|
||||
|
||||
collectArchives, err := testComand.Flags().GetBool("collector.reader.collect_archives")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, true, collectArchives)
|
||||
})
|
||||
}
|
||||
|
||||
// test StringOptionsVar
|
||||
func TestStringOptionsVar(t *testing.T) {
|
||||
|
||||
t.Run("test we can set", func(t *testing.T) {
|
||||
format := params.StringOptionsVar{
|
||||
Options: []string{"csv", "tsv", "native"},
|
||||
Value: "csv",
|
||||
}
|
||||
require.Equal(t, "csv", format.String())
|
||||
err := format.Set("tsv")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "tsv", format.String())
|
||||
})
|
||||
|
||||
t.Run("test set invalid", func(t *testing.T) {
|
||||
format := params.StringOptionsVar{
|
||||
Options: []string{"csv", "tsv", "native"},
|
||||
Value: "csv",
|
||||
}
|
||||
require.Equal(t, "csv", format.String())
|
||||
err := format.Set("random")
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "random is not included in options: [csv tsv native]", err.Error())
|
||||
})
|
||||
}
|
||||
|
||||
// test StringSliceOptionsVar
|
||||
func TestStringSliceOptionsVar(t *testing.T) {
|
||||
|
||||
t.Run("test we can set", func(t *testing.T) {
|
||||
formats := params.StringSliceOptionsVar{
|
||||
Options: []string{"csv", "tsv", "native", "qsv"},
|
||||
Values: []string{"csv", "tsv"},
|
||||
}
|
||||
require.Equal(t, "[csv,tsv]", formats.String())
|
||||
err := formats.Set("tsv,native")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "[tsv,native]", formats.String())
|
||||
})
|
||||
|
||||
t.Run("test set invalid", func(t *testing.T) {
|
||||
formats := params.StringSliceOptionsVar{
|
||||
Options: []string{"csv", "tsv", "native", "qsv"},
|
||||
Values: []string{"csv", "tsv"},
|
||||
}
|
||||
require.Equal(t, "[csv,tsv]", formats.String())
|
||||
err := formats.Set("tsv,random")
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "[random] are not included in options: [csv tsv native qsv]", err.Error())
|
||||
err = formats.Set("msv,random")
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "[msv random] are not included in options: [csv tsv native qsv]", err.Error())
|
||||
})
|
||||
|
||||
}
|
@ -1,174 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
)
|
||||
|
||||
func enableDebug() {
|
||||
if debug {
|
||||
zerolog.SetGlobalLevel(zerolog.DebugLevel)
|
||||
go func() {
|
||||
err := http.ListenAndServe("localhost:8080", nil)
|
||||
if err != nil {
|
||||
log.Error().Err(err).Msg("unable to start debugger")
|
||||
} else {
|
||||
log.Debug().Msg("debugger has been started on port 8080")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
var rootCmd = &cobra.Command{
|
||||
Use: "clickhouse-diagnostics",
|
||||
Short: "Capture and convert ClickHouse diagnostic bundles.",
|
||||
Long: `Captures ClickHouse diagnostic bundles to a number of supported formats, including file and ClickHouse itself. Converts bundles between formats.`,
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
enableDebug()
|
||||
err := initializeConfig()
|
||||
if err != nil {
|
||||
log.Error().Err(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return nil
|
||||
},
|
||||
Example: `clickhouse-diagnostics collect`,
|
||||
}
|
||||
|
||||
const (
|
||||
colorRed = iota + 31
|
||||
colorGreen
|
||||
colorYellow
|
||||
colorMagenta = 35
|
||||
|
||||
colorBold = 1
|
||||
)
|
||||
|
||||
const TimeFormat = time.RFC3339
|
||||
|
||||
var debug bool
|
||||
var configFiles []string
|
||||
|
||||
const (
|
||||
// The environment variable prefix of all environment variables bound to our command line flags.
|
||||
// For example, --output is bound to CLICKHOUSE_DIAGNOSTIC_OUTPUT.
|
||||
envPrefix = "CLICKHOUSE_DIAGNOSTIC"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "d", false, "Enable debug mode")
|
||||
rootCmd.PersistentFlags().StringSliceVarP(&configFiles, "config", "f", []string{"clickhouse-diagnostics.yml", "/etc/clickhouse-diagnostics.yml"}, "Configuration file path")
|
||||
// set a usage template to ensure flags on root are listed as global
|
||||
rootCmd.SetUsageTemplate(`Usage:{{if .Runnable}}
|
||||
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
|
||||
|
||||
Aliases:
|
||||
{{.NameAndAliases}}{{end}}{{if .HasExample}}
|
||||
|
||||
Examples:
|
||||
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
|
||||
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
|
||||
|
||||
Global Flags:
|
||||
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
|
||||
|
||||
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
|
||||
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
|
||||
|
||||
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
|
||||
`)
|
||||
rootCmd.SetFlagErrorFunc(handleFlagErrors)
|
||||
|
||||
}
|
||||
|
||||
func Execute() {
|
||||
// logs go to stderr - stdout is exclusive for outputs e.g. tables
|
||||
output := zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: TimeFormat}
|
||||
// override the colors
|
||||
output.FormatLevel = func(i interface{}) string {
|
||||
var l string
|
||||
if ll, ok := i.(string); ok {
|
||||
switch ll {
|
||||
case zerolog.LevelTraceValue:
|
||||
l = colorize("TRC", colorMagenta)
|
||||
case zerolog.LevelDebugValue:
|
||||
l = colorize("DBG", colorMagenta)
|
||||
case zerolog.LevelInfoValue:
|
||||
l = colorize("INF", colorGreen)
|
||||
case zerolog.LevelWarnValue:
|
||||
l = colorize(colorize("WRN", colorYellow), colorBold)
|
||||
case zerolog.LevelErrorValue:
|
||||
l = colorize(colorize("ERR", colorRed), colorBold)
|
||||
case zerolog.LevelFatalValue:
|
||||
l = colorize(colorize("FTL", colorRed), colorBold)
|
||||
case zerolog.LevelPanicValue:
|
||||
l = colorize(colorize("PNC", colorRed), colorBold)
|
||||
default:
|
||||
l = colorize("???", colorBold)
|
||||
}
|
||||
} else {
|
||||
if i == nil {
|
||||
l = colorize("???", colorBold)
|
||||
} else {
|
||||
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
output.FormatTimestamp = func(i interface{}) string {
|
||||
tt := i.(string)
|
||||
return colorize(tt, colorGreen)
|
||||
}
|
||||
log.Logger = log.Output(output)
|
||||
zerolog.SetGlobalLevel(zerolog.InfoLevel)
|
||||
rootCmd.SetHelpCommand(helpCmd)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
}
|
||||
|
||||
// colorize returns the string s wrapped in ANSI code c
|
||||
func colorize(s interface{}, c int) string {
|
||||
return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s)
|
||||
}
|
||||
|
||||
func handleFlagErrors(cmd *cobra.Command, err error) error {
|
||||
fmt.Println(colorize(colorize(fmt.Sprintf("Error: %s\n", err), colorRed), colorBold))
|
||||
_ = cmd.Help()
|
||||
os.Exit(1)
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializeConfig() error {
|
||||
// we use the first config file we find
|
||||
var configFile string
|
||||
for _, confFile := range configFiles {
|
||||
if ok, _ := utils.FileExists(confFile); ok {
|
||||
configFile = confFile
|
||||
break
|
||||
}
|
||||
}
|
||||
if configFile == "" {
|
||||
log.Warn().Msgf("config file in %s not found - config file will be ignored", configFiles)
|
||||
return nil
|
||||
}
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return errors.Wrapf(err, "Unable to read configuration file at %s", configFile)
|
||||
}
|
||||
return nil
|
||||
}
|
@ -1,24 +0,0 @@
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var (
|
||||
Version = "" // set at compile time with -ldflags "-X versserv/cmd.Version=x.y.yz"
|
||||
Commit = ""
|
||||
)
|
||||
|
||||
func init() {
|
||||
rootCmd.AddCommand(versionCmd)
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version number of clickhouse-diagnostics",
|
||||
Long: `All software has versions. This is clickhouse-diagnostics`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Printf("Clickhouse Diagnostics %s (%s)\n", Version, Commit)
|
||||
},
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
module github.com/ClickHouse/ClickHouse/programs/diagnostics
|
||||
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.0.12
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/bmatcuk/doublestar/v4 v4.0.2
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/elastic/gosigar v0.14.2
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/jaypipes/ghw v0.8.0
|
||||
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f
|
||||
github.com/mholt/archiver/v4 v4.0.0-alpha.4
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/testcontainers/testcontainers-go v0.18.0
|
||||
github.com/yargevad/filepathx v1.0.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||
github.com/containerd/containerd v1.6.17 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/distribution/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v23.0.0+incompatible // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dsnet/compress v0.0.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/ghodss/yaml v1.0.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.4 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jaypipes/pcidb v0.6.0 // indirect
|
||||
github.com/klauspost/compress v1.13.6 // indirect
|
||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/opencontainers/runc v1.1.3 // indirect
|
||||
github.com/paulmach/orb v0.4.0 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/sirupsen/logrus v1.9.0 // indirect
|
||||
github.com/spf13/afero v1.8.0 // indirect
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/therootcompany/xz v1.0.1 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
go.opentelemetry.io/otel v1.4.1 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.4.1 // indirect
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||
)
|
@ -1,992 +0,0 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
|
||||
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
|
||||
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
|
||||
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
|
||||
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
|
||||
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
|
||||
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
|
||||
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
|
||||
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/clickhouse-go v1.5.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.0.12 h1:Nbl/NZwoM6LGJm7smNBgvtdr/rxjlIssSW3eG/Nmb9E=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.0.12/go.mod h1:u4RoNQLLM2W6hNSPYrIESLJqaWSInZVmfM+MlaAhXcg=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
|
||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA=
|
||||
github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
|
||||
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||
github.com/containerd/containerd v1.6.17 h1:XDnJIeJW0cLf6v7/+N+6L9kGrChHeXekZp2VHu6OpiY=
|
||||
github.com/containerd/containerd v1.6.17/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=
|
||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
|
||||
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8=
|
||||
github.com/distribution/distribution v2.8.2+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
|
||||
github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
|
||||
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
|
||||
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
|
||||
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
|
||||
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
|
||||
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
|
||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
|
||||
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
|
||||
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
|
||||
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
|
||||
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
|
||||
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
|
||||
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
|
||||
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
|
||||
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
|
||||
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
|
||||
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
|
||||
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
|
||||
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
|
||||
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
|
||||
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
|
||||
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
|
||||
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
|
||||
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
|
||||
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
|
||||
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jaypipes/ghw v0.8.0 h1:02q1pTm9CD83vuhBsEZZhOCS128pq87uyaQeJZkp3sQ=
|
||||
github.com/jaypipes/ghw v0.8.0/go.mod h1:+gR9bjm3W/HnFi90liF+Fj9GpCe/Dsibl9Im8KmC7c4=
|
||||
github.com/jaypipes/pcidb v0.6.0 h1:VIM7GKVaW4qba30cvB67xSCgJPTzkG8Kzw/cbs5PHWU=
|
||||
github.com/jaypipes/pcidb v0.6.0/go.mod h1:L2RGk04sfRhp5wvHO0gfRAMoLY/F3PKv/nwJeVoho0o=
|
||||
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
|
||||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
|
||||
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
|
||||
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f h1:B0OD7nYl2FPQEVrw8g2uyc1lGEzNbvrKh7fspGZcbvY=
|
||||
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f/go.mod h1:aEt7p9Rvh67BYApmZwNDPpgircTO2kgdmDUoF/1QmwA=
|
||||
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
|
||||
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
|
||||
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
|
||||
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mholt/archiver/v4 v4.0.0-alpha.4 h1:QJ4UuWgavPynEX3LXxClHDRGzYcgcvTtAMp8az7spuw=
|
||||
github.com/mholt/archiver/v4 v4.0.0-alpha.4/go.mod h1:J7SYS/UTAtnO3I49RQEf+2FYZVwo7XBOh9Im43VrjNs=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
|
||||
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
|
||||
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM=
|
||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
|
||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
|
||||
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
|
||||
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f h1:J/7hjLaHLD7epG0m6TBMGmp4NQ+ibBYLfeyJWdAIFLA=
|
||||
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
|
||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
|
||||
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/paulmach/orb v0.4.0 h1:ilp1MQjRapLJ1+qcays1nZpe0mvkCY+b8JU/qBKRZ1A=
|
||||
github.com/paulmach/orb v0.4.0/go.mod h1:FkcWtplUAIVqAuhAOV2d3rpbnQyliDOjOcLW9dUrfdU=
|
||||
github.com/paulmach/protoscan v0.2.1-0.20210522164731-4e53c6875432/go.mod h1:2sV+uZ/oQh66m4XJVZm5iqUZ62BN88Ex1E+TTS0nLzI=
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
|
||||
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
|
||||
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
|
||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
|
||||
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||
github.com/testcontainers/testcontainers-go v0.18.0 h1:8RXrcIQv5xX/uBOSmZd297gzvA7F0yuRA37/918o7Yg=
|
||||
github.com/testcontainers/testcontainers-go v0.18.0/go.mod h1:rLC7hR2SWRjJZZNrUYiTKvUXCziNxzZiYtz9icTWYNQ=
|
||||
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
|
||||
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g=
|
||||
go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
|
||||
go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ=
|
||||
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
|
||||
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
|
||||
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
|
||||
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
|
||||
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
|
||||
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
|
||||
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
|
||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
|
||||
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
|
||||
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
|
||||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U=
|
||||
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
|
||||
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
@ -1,113 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type ConfigCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewConfigCollector(m *platform.ResourceManager) *ConfigCollector {
|
||||
return &ConfigCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
const DefaultConfigLocation = "/etc/clickhouse-server/"
|
||||
const ProcessedConfigurationLocation = "/var/lib/clickhouse/preprocessed_configs"
|
||||
|
||||
func (c ConfigCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(c.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
directory, err := config.ReadStringValue(conf, "directory")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
|
||||
if directory != "" {
|
||||
// user has specified a directory - we therefore skip all other efforts to locate the config
|
||||
frame, errs := data.NewConfigFileFrame(directory)
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: map[string]data.Frame{
|
||||
"user_specified": frame,
|
||||
},
|
||||
Errors: data.FrameErrors{Errors: errs},
|
||||
}, nil
|
||||
}
|
||||
configCandidates, err := FindConfigurationFiles()
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Unable to find configuration files")
|
||||
}
|
||||
frames := make(map[string]data.Frame)
|
||||
var frameErrors []error
|
||||
for frameName, confDir := range configCandidates {
|
||||
frame, errs := data.NewConfigFileFrame(confDir)
|
||||
frameErrors = append(frameErrors, errs...)
|
||||
frames[frameName] = frame
|
||||
}
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: data.FrameErrors{Errors: frameErrors},
|
||||
}, err
|
||||
}
|
||||
|
||||
func FindConfigurationFiles() (map[string]string, error) {
|
||||
configCandidates := map[string]string{
|
||||
"default": DefaultConfigLocation,
|
||||
"preprocessed": ProcessedConfigurationLocation,
|
||||
}
|
||||
// we don't know specifically where the config is but try to find via processes
|
||||
processConfigs, err := utils.FindConfigsFromClickHouseProcesses()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, path := range processConfigs {
|
||||
confDir := filepath.Dir(path)
|
||||
if len(processConfigs) == 1 {
|
||||
configCandidates["process"] = confDir
|
||||
break
|
||||
}
|
||||
configCandidates[fmt.Sprintf("process_%d", i)] = confDir
|
||||
}
|
||||
return configCandidates, nil
|
||||
}
|
||||
|
||||
func (c ConfigCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("directory", "Specify the location of the configuration files for ClickHouse Server e.g. /etc/clickhouse-server/", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c ConfigCollector) Description() string {
|
||||
return "Collects the ClickHouse configuration from the local filesystem."
|
||||
}
|
||||
|
||||
func (c ConfigCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("config", func() (collectors.Collector, error) {
|
||||
return &ConfigCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,128 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestConfigConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for config collector", func(t *testing.T) {
|
||||
configCollector := clickhouse.NewConfigCollector(&platform.ResourceManager{})
|
||||
conf := configCollector.Configuration()
|
||||
require.Len(t, conf.Params, 1)
|
||||
// check first param
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
directory, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, directory.Required())
|
||||
require.Equal(t, directory.Name(), "directory")
|
||||
require.Equal(t, "", directory.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigCollect(t *testing.T) {
|
||||
configCollector := clickhouse.NewConfigCollector(&platform.ResourceManager{})
|
||||
|
||||
t.Run("test default file collector configuration", func(t *testing.T) {
|
||||
diagSet, err := configCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
// we won't be able to collect the default configs preprocessed and default - even if clickhouse is installed
|
||||
// these directories should not be readable under any permissions these tests are unrealistically executed!
|
||||
// note: we may also pick up configs from a local clickhouse process - we thus allow a len >=2 but don't check this
|
||||
// as its non-deterministic
|
||||
require.GreaterOrEqual(t, len(diagSet.Frames), 2)
|
||||
// check default key
|
||||
require.Contains(t, diagSet.Frames, "default")
|
||||
require.Equal(t, diagSet.Frames["default"].Name(), "/etc/clickhouse-server/")
|
||||
require.Equal(t, diagSet.Frames["default"].Columns(), []string{"config"})
|
||||
// collection will have failed
|
||||
checkFrame(t, diagSet.Frames["default"], nil)
|
||||
// check preprocessed key
|
||||
require.Contains(t, diagSet.Frames, "preprocessed")
|
||||
require.Equal(t, diagSet.Frames["preprocessed"].Name(), "/var/lib/clickhouse/preprocessed_configs")
|
||||
require.Equal(t, diagSet.Frames["preprocessed"].Columns(), []string{"config"})
|
||||
// min of 2 - might be more if a local installation of clickhouse is running
|
||||
require.GreaterOrEqual(t, len(diagSet.Errors.Errors), 2)
|
||||
})
|
||||
|
||||
t.Run("test configuration when specified", func(t *testing.T) {
|
||||
// create some test files
|
||||
tempDir := t.TempDir()
|
||||
confDir := path.Join(tempDir, "conf")
|
||||
// create an includes file
|
||||
includesDir := path.Join(tempDir, "includes")
|
||||
err := os.MkdirAll(includesDir, os.ModePerm)
|
||||
require.Nil(t, err)
|
||||
includesPath := path.Join(includesDir, "random.xml")
|
||||
includesFile, err := os.Create(includesPath)
|
||||
require.Nil(t, err)
|
||||
xmlWriter := io.Writer(includesFile)
|
||||
enc := xml.NewEncoder(xmlWriter)
|
||||
enc.Indent(" ", " ")
|
||||
xmlConfig := data.XmlConfig{
|
||||
XMLName: xml.Name{},
|
||||
Clickhouse: data.XmlLoggerConfig{
|
||||
XMLName: xml.Name{},
|
||||
ErrorLog: "/var/log/clickhouse-server/clickhouse-server.err.log",
|
||||
Log: "/var/log/clickhouse-server/clickhouse-server.log",
|
||||
},
|
||||
IncludeFrom: "",
|
||||
}
|
||||
err = enc.Encode(xmlConfig)
|
||||
require.Nil(t, err)
|
||||
// create 5 temporary config files - length is 6 for the included file
|
||||
rows := make([][]interface{}, 6)
|
||||
for i := 0; i < 5; i++ {
|
||||
if i == 4 {
|
||||
// set the includes for the last doc
|
||||
xmlConfig.IncludeFrom = includesPath
|
||||
}
|
||||
// we want to check hierarchies are walked so create a simple folder for each file
|
||||
fileDir := path.Join(confDir, fmt.Sprintf("%d", i))
|
||||
err := os.MkdirAll(fileDir, os.ModePerm)
|
||||
require.Nil(t, err)
|
||||
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.xml", i))
|
||||
row := make([]interface{}, 1)
|
||||
row[0] = data.XmlConfigFile{Path: filepath}
|
||||
rows[i] = row
|
||||
xmlFile, err := os.Create(filepath)
|
||||
require.Nil(t, err)
|
||||
// write a little xml so its valid
|
||||
xmlConfig := xmlConfig
|
||||
xmlWriter := io.Writer(xmlFile)
|
||||
enc := xml.NewEncoder(xmlWriter)
|
||||
enc.Indent(" ", " ")
|
||||
err = enc.Encode(xmlConfig)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
diagSet, err := configCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: confDir,
|
||||
Param: config.NewParam("directory", "File locations", false),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Frames, 1)
|
||||
require.Contains(t, diagSet.Frames, "user_specified")
|
||||
require.Equal(t, diagSet.Frames["user_specified"].Name(), confDir)
|
||||
require.Equal(t, diagSet.Frames["user_specified"].Columns(), []string{"config"})
|
||||
iConf := make([]interface{}, 1)
|
||||
iConf[0] = data.XmlConfigFile{Path: includesPath, Included: true}
|
||||
rows[5] = iConf
|
||||
checkFrame(t, diagSet.Frames["user_specified"], rows)
|
||||
})
|
||||
}
|
@ -1,108 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type DBLogTable struct {
|
||||
orderBy data.OrderBy
|
||||
excludeColumns []string
|
||||
}
|
||||
|
||||
var DbLogTables = map[string]DBLogTable{
|
||||
"query_log": {
|
||||
orderBy: data.OrderBy{
|
||||
Column: "event_time_microseconds",
|
||||
Order: data.Asc,
|
||||
},
|
||||
excludeColumns: []string{},
|
||||
},
|
||||
"query_thread_log": {
|
||||
orderBy: data.OrderBy{
|
||||
Column: "event_time_microseconds",
|
||||
Order: data.Asc,
|
||||
},
|
||||
excludeColumns: []string{},
|
||||
},
|
||||
"text_log": {
|
||||
orderBy: data.OrderBy{
|
||||
Column: "event_time_microseconds",
|
||||
Order: data.Asc,
|
||||
},
|
||||
excludeColumns: []string{},
|
||||
},
|
||||
}
|
||||
|
||||
// This collector collects db logs
|
||||
|
||||
type DBLogsCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewDBLogsCollector(m *platform.ResourceManager) *DBLogsCollector {
|
||||
return &DBLogsCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DBLogsCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(dc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
rowLimit, err := config.ReadIntValue(conf, "row_limit")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
|
||||
frames := make(map[string]data.Frame)
|
||||
var frameErrors []error
|
||||
for logTable, tableConfig := range DbLogTables {
|
||||
frame, err := dc.resourceManager.DbClient.ReadTable("system", logTable, tableConfig.excludeColumns, tableConfig.orderBy, rowLimit)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to collect %s", logTable))
|
||||
} else {
|
||||
frames[logTable] = frame
|
||||
}
|
||||
}
|
||||
|
||||
fErrors := data.FrameErrors{
|
||||
Errors: frameErrors,
|
||||
}
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: fErrors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (dc *DBLogsCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: 100000,
|
||||
Param: config.NewParam("row_limit", "Maximum number of log rows to collect. Negative values mean unlimited", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (dc *DBLogsCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (dc DBLogsCollector) Description() string {
|
||||
return "Collects the ClickHouse logs directly from the database."
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("db_logs", func() (collectors.Collector, error) {
|
||||
return &DBLogsCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,119 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDbLogsConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for summary collector", func(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
dbLogsCollector := clickhouse.NewDBLogsCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
conf := dbLogsCollector.Configuration()
|
||||
require.Len(t, conf.Params, 1)
|
||||
require.IsType(t, config.IntParam{}, conf.Params[0])
|
||||
rowLimit, ok := conf.Params[0].(config.IntParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, rowLimit.Required())
|
||||
require.Equal(t, rowLimit.Name(), "row_limit")
|
||||
require.Equal(t, int64(100000), rowLimit.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDbLogsCollect(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
dbLogsCollector := clickhouse.NewDBLogsCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
queryLogColumns := []string{"type", "event_date", "event_time", "event_time_microseconds",
|
||||
"query_start_time", "query_start_time_microseconds", "query_duration_ms", "read_rows", "read_bytes", "written_rows", "written_bytes",
|
||||
"result_rows", "result_bytes", "memory_usage", "current_database", "query", "formatted_query", "normalized_query_hash",
|
||||
"query_kind", "databases", "tables", "columns", "projections", "views", "exception_code", "exception", "stack_trace",
|
||||
"is_initial_query", "user", "query_id", "address", "port", "initial_user", "initial_query_id", "initial_address", "initial_port",
|
||||
"initial_query_start_time", "initial_query_start_time_microseconds", "interface", "os_user", "client_hostname", "client_name",
|
||||
"client_revision", "client_version_major", "client_version_minor", "client_version_patch", "http_method", "http_user_agent",
|
||||
"http_referer", "forwarded_for", "quota_key", "revision", "log_comment", "thread_ids", "ProfileEvents", "Settings",
|
||||
"used_aggregate_functions", "used_aggregate_function_combinators", "used_database_engines", "used_data_type_families",
|
||||
"used_dictionaries", "used_formats", "used_functions", "used_storages", "used_table_functions"}
|
||||
queryLogFrame := test.NewFakeDataFrame("queryLog", queryLogColumns,
|
||||
[][]interface{}{
|
||||
{"QueryStart", "2021-12-13", "2021-12-13 12:53:20", "2021-12-13 12:53:20.590579", "2021-12-13 12:53:20", "2021-12-13 12:53:20.590579", "0", "0", "0", "0", "0", "0", "0", "0", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[]", "{}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]"},
|
||||
{"QueryFinish", "2021-12-13", "2021-12-13 12:53:30", "2021-12-13 12:53:30.607292", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "15", "4512", "255694", "0", "0", "4358", "173248", "4415230", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[95298,95315,95587,95316,95312,95589,95318,95586,95588,95585]", "{'Query':1,'SelectQuery':1,'ArenaAllocChunks':41,'ArenaAllocBytes':401408,'FunctionExecute':62,'NetworkSendElapsedMicroseconds':463,'NetworkSendBytes':88452,'SelectedRows':4512,'SelectedBytes':255694,'RegexpCreated':6,'ContextLock':411,'RWLockAcquiredReadLocks':190,'RealTimeMicroseconds':49221,'UserTimeMicroseconds':19811,'SystemTimeMicroseconds':2817,'SoftPageFaults':1128,'OSCPUWaitMicroseconds':127,'OSCPUVirtualTimeMicroseconds':22624,'OSWriteBytes':12288,'OSWriteChars':13312}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "['concat','notEmpty','extractAll']", "[]", "[]"},
|
||||
{"QueryStart", "2021-12-13", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "0", "0", "0", "0", "0", "0", "0", "0", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[]", "{}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]"},
|
||||
{"QueryFinish", "2021-12-13", "2021-12-13 13:02:56", "2021-12-13 13:02:56.437115", "2021-12-13 13:02:56", "2021-12-13 13:02:56.419528", "16", "4629", "258376", "0", "0", "4377", "174272", "4404694", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[95298,95318,95315,95316,95312,95588,95589,95586,95585,95587]", "{'Query':1,'SelectQuery':1,'ArenaAllocChunks':41,'ArenaAllocBytes':401408,'FunctionExecute':62,'NetworkSendElapsedMicroseconds':740,'NetworkSendBytes':88794,'SelectedRows':4629,'SelectedBytes':258376,'ContextLock':411,'RWLockAcquiredReadLocks':194,'RealTimeMicroseconds':52469,'UserTimeMicroseconds':17179,'SystemTimeMicroseconds':4218,'SoftPageFaults':569,'OSCPUWaitMicroseconds':303,'OSCPUVirtualTimeMicroseconds':25087,'OSWriteBytes':12288,'OSWriteChars':12288}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "['concat','notEmpty','extractAll']", "[]", "[]"},
|
||||
})
|
||||
|
||||
client.QueryResponses["SELECT * FROM system.query_log ORDER BY event_time_microseconds ASC LIMIT 100000"] = &queryLogFrame
|
||||
|
||||
textLogColumns := []string{"event_date", "event_time", "event_time_microseconds", "microseconds", "thread_name", "thread_id", "level", "query_id", "logger_name", "message", "revision", "source_file", "source_line"}
|
||||
textLogFrame := test.NewFakeDataFrame("textLog", textLogColumns,
|
||||
[][]interface{}{
|
||||
{"2022-02-03", "2022-02-03 16:17:47", "2022-02-03 16:37:17.056950", "56950", "clickhouse-serv", "68947", "Information", "", "DNSCacheUpdater", "Update period 15 seconds", "54458", "../src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()", "46"},
|
||||
{"2022-02-03", "2022-02-03 16:27:47", "2022-02-03 16:37:27.057022", "57022", "clickhouse-serv", "68947", "Information", "", "Application", "Available RAM: 62.24 GiB; physical cores: 8; logical cores: 16.", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1380"},
|
||||
{"2022-02-03", "2022-02-03 16:37:47", "2022-02-03 16:37:37.057484", "57484", "clickhouse-serv", "68947", "Information", "", "Application", "Listening for http://[::1]:8123", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1444"},
|
||||
{"2022-02-03", "2022-02-03 16:47:47", "2022-02-03 16:37:47.057527", "57527", "clickhouse-serv", "68947", "Information", "", "Application", "Listening for native protocol (tcp): [::1]:9000", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1444"},
|
||||
})
|
||||
|
||||
client.QueryResponses["SELECT * FROM system.text_log ORDER BY event_time_microseconds ASC LIMIT 100000"] = &textLogFrame
|
||||
|
||||
// skip query_thread_log frame - often it doesn't exist anyway unless enabled
|
||||
t.Run("test default db logs collection", func(t *testing.T) {
|
||||
bundle, errs := dbLogsCollector.Collect(config.Configuration{})
|
||||
require.Empty(t, errs)
|
||||
require.NotNil(t, bundle)
|
||||
require.Len(t, bundle.Frames, 2)
|
||||
require.Contains(t, bundle.Frames, "text_log")
|
||||
require.Contains(t, bundle.Frames, "query_log")
|
||||
require.Len(t, bundle.Errors.Errors, 1)
|
||||
// check query_log frame
|
||||
require.Contains(t, bundle.Frames, "query_log")
|
||||
require.Equal(t, queryLogColumns, bundle.Frames["query_log"].Columns())
|
||||
checkFrame(t, bundle.Frames["query_log"], queryLogFrame.Rows)
|
||||
//check text_log frame
|
||||
require.Contains(t, bundle.Frames, "text_log")
|
||||
require.Equal(t, textLogColumns, bundle.Frames["text_log"].Columns())
|
||||
checkFrame(t, bundle.Frames["text_log"], textLogFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test db logs collection with limit", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: 1,
|
||||
Param: config.NewParam("row_limit", "Maximum number of log rows to collect. Negative values mean unlimited", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
bundle, err := dbLogsCollector.Collect(conf)
|
||||
require.Empty(t, err)
|
||||
require.NotNil(t, bundle)
|
||||
require.Len(t, bundle.Frames, 0)
|
||||
require.Len(t, bundle.Errors.Errors, 3)
|
||||
// populate client
|
||||
client.QueryResponses["SELECT * FROM system.query_log ORDER BY event_time_microseconds ASC LIMIT 1"] = &queryLogFrame
|
||||
client.QueryResponses["SELECT * FROM system.text_log ORDER BY event_time_microseconds ASC LIMIT 1"] = &textLogFrame
|
||||
bundle, err = dbLogsCollector.Collect(conf)
|
||||
require.Empty(t, err)
|
||||
require.Len(t, bundle.Frames, 2)
|
||||
require.Len(t, bundle.Errors.Errors, 1)
|
||||
require.Contains(t, bundle.Frames, "text_log")
|
||||
require.Contains(t, bundle.Frames, "query_log")
|
||||
// check query_log frame
|
||||
require.Contains(t, bundle.Frames, "query_log")
|
||||
require.Equal(t, queryLogColumns, bundle.Frames["query_log"].Columns())
|
||||
checkFrame(t, bundle.Frames["query_log"], queryLogFrame.Rows[:1])
|
||||
//check text_log frame
|
||||
require.Contains(t, bundle.Frames, "text_log")
|
||||
require.Equal(t, textLogColumns, bundle.Frames["text_log"].Columns())
|
||||
checkFrame(t, bundle.Frames["text_log"], textLogFrame.Rows[:1])
|
||||
client.Reset()
|
||||
})
|
||||
}
|
@ -1,140 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
)
|
||||
|
||||
// This collector collects logs
|
||||
|
||||
type LogsCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewLogsCollector(m *platform.ResourceManager) *LogsCollector {
|
||||
return &LogsCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
var DefaultLogsLocation = filepath.Clean("/var/log/clickhouse-server/")
|
||||
|
||||
func (lc *LogsCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(lc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
directory, err := config.ReadStringValue(conf, "directory")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
collectArchives, err := config.ReadBoolValue(conf, "collect_archives")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
logPatterns := []string{"*.log"}
|
||||
if collectArchives {
|
||||
logPatterns = append(logPatterns, "*.gz")
|
||||
}
|
||||
|
||||
if directory != "" {
|
||||
// user has specified a directory - we therefore skip all other efforts to locate the logs
|
||||
frame, errs := data.NewFileDirectoryFrame(directory, logPatterns)
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: map[string]data.Frame{
|
||||
"user_specified": frame,
|
||||
},
|
||||
Errors: data.FrameErrors{Errors: errs},
|
||||
}, nil
|
||||
}
|
||||
// add the default
|
||||
frames := make(map[string]data.Frame)
|
||||
dirFrame, frameErrors := data.NewFileDirectoryFrame(DefaultLogsLocation, logPatterns)
|
||||
frames["default"] = dirFrame
|
||||
logFolders, errs := FindLogFileCandidates()
|
||||
frameErrors = append(frameErrors, errs...)
|
||||
i := 0
|
||||
for folder, paths := range logFolders {
|
||||
// we will collect the default location anyway above so skip these
|
||||
if folder != DefaultLogsLocation {
|
||||
if collectArchives {
|
||||
paths = append(paths, "*.gz")
|
||||
}
|
||||
dirFrame, errs := data.NewFileDirectoryFrame(folder, paths)
|
||||
frames[fmt.Sprintf("logs-%d", i)] = dirFrame
|
||||
frameErrors = append(frameErrors, errs...)
|
||||
}
|
||||
}
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: data.FrameErrors{Errors: frameErrors},
|
||||
}, err
|
||||
}
|
||||
|
||||
func (lc *LogsCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
config.BoolParam{
|
||||
Param: config.NewParam("collect_archives", "Collect compressed log archive files", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func FindLogFileCandidates() (logFolders map[string][]string, configErrors []error) {
|
||||
// we need the config to determine the location of the logs
|
||||
configCandidates := make(map[string]data.ConfigFileFrame)
|
||||
configFiles, err := FindConfigurationFiles()
|
||||
logFolders = make(map[string][]string)
|
||||
if err != nil {
|
||||
configErrors = append(configErrors, err)
|
||||
return logFolders, configErrors
|
||||
}
|
||||
for _, folder := range configFiles {
|
||||
configFrame, errs := data.NewConfigFileFrame(folder)
|
||||
configErrors = append(configErrors, errs...)
|
||||
configCandidates[filepath.Clean(folder)] = configFrame
|
||||
}
|
||||
|
||||
for _, config := range configCandidates {
|
||||
paths, errs := config.FindLogPaths()
|
||||
for _, path := range paths {
|
||||
folder := filepath.Dir(path)
|
||||
filename := filepath.Base(path)
|
||||
if _, ok := logFolders[folder]; !ok {
|
||||
logFolders[folder] = []string{}
|
||||
}
|
||||
logFolders[folder] = utils.Unique(append(logFolders[folder], filename))
|
||||
}
|
||||
configErrors = append(configErrors, errs...)
|
||||
}
|
||||
return logFolders, configErrors
|
||||
}
|
||||
|
||||
func (lc *LogsCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (lc LogsCollector) Description() string {
|
||||
return "Collects the ClickHouse logs directly from the database."
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("logs", func() (collectors.Collector, error) {
|
||||
return &LogsCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,147 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLogsConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for logs collector", func(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
logsCollector := clickhouse.NewLogsCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
conf := logsCollector.Configuration()
|
||||
require.Len(t, conf.Params, 2)
|
||||
// check directory
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
directory, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, directory.Required())
|
||||
require.Equal(t, directory.Name(), "directory")
|
||||
require.Empty(t, directory.Value)
|
||||
// check collect_archives
|
||||
require.IsType(t, config.BoolParam{}, conf.Params[1])
|
||||
collectArchives, ok := conf.Params[1].(config.BoolParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, collectArchives.Required())
|
||||
require.Equal(t, collectArchives.Name(), "collect_archives")
|
||||
require.False(t, collectArchives.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestLogsCollect(t *testing.T) {
|
||||
|
||||
logsCollector := clickhouse.NewLogsCollector(&platform.ResourceManager{})
|
||||
|
||||
t.Run("test default logs collection", func(t *testing.T) {
|
||||
// we can't rely on a local installation of clickhouse being present for tests - if it is present (and running)
|
||||
// results maybe variable e.g. we may find a config. For now, we allow flexibility and test only default.
|
||||
// TODO: we may want to test this within a container
|
||||
bundle, err := logsCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, bundle)
|
||||
// we will have some errors if clickhouse is installed or not. If former, permission issues - if latter missing folders.
|
||||
require.Greater(t, len(bundle.Errors.Errors), 0)
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
require.Contains(t, bundle.Frames, "default")
|
||||
_, ok := bundle.Frames["default"].(data.DirectoryFileFrame)
|
||||
require.True(t, ok)
|
||||
// no guarantees clickhouse is installed so this bundle could have no frames
|
||||
})
|
||||
|
||||
t.Run("test logs collection when directory is specified", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
logsPath := path.Join(cwd, "../../../testdata", "logs", "var", "logs")
|
||||
bundle, err := logsCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: logsPath,
|
||||
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
checkDirectoryBundle(t, bundle, logsPath, []string{"clickhouse-server.log", "clickhouse-server.err.log"})
|
||||
|
||||
})
|
||||
|
||||
t.Run("test logs collection of archives", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
logsPath := path.Join(cwd, "../../../testdata", "logs", "var", "logs")
|
||||
bundle, err := logsCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: logsPath,
|
||||
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("collect_archives", "Collect compressed log archive files", false),
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
checkDirectoryBundle(t, bundle, logsPath, []string{"clickhouse-server.log", "clickhouse-server.err.log", "clickhouse-server.log.gz"})
|
||||
})
|
||||
|
||||
t.Run("test when directory does not exist", func(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
logsPath := path.Join(tmpDir, "random")
|
||||
bundle, err := logsCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: logsPath,
|
||||
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
// not a fatal error currently
|
||||
require.Nil(t, err)
|
||||
require.Len(t, bundle.Errors.Errors, 1)
|
||||
require.Equal(t, fmt.Sprintf("directory %s does not exist", logsPath), bundle.Errors.Errors[0].Error())
|
||||
})
|
||||
}
|
||||
|
||||
func checkDirectoryBundle(t *testing.T, bundle *data.DiagnosticBundle, logsPath string, expectedFiles []string) {
|
||||
require.NotNil(t, bundle)
|
||||
require.Nil(t, bundle.Errors.Errors)
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
require.Contains(t, bundle.Frames, "user_specified")
|
||||
dirFrame, ok := bundle.Frames["user_specified"].(data.DirectoryFileFrame)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, logsPath, dirFrame.Directory)
|
||||
require.Equal(t, []string{"files"}, dirFrame.Columns())
|
||||
i := 0
|
||||
fullPaths := make([]string, len(expectedFiles))
|
||||
for i, filePath := range expectedFiles {
|
||||
fullPaths[i] = path.Join(logsPath, filePath)
|
||||
}
|
||||
for {
|
||||
values, ok, err := dirFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Len(t, values, 1)
|
||||
file, ok := values[0].(data.SimpleFile)
|
||||
require.True(t, ok)
|
||||
require.Contains(t, fullPaths, file.FilePath())
|
||||
i += 1
|
||||
}
|
||||
require.Equal(t, len(fullPaths), i)
|
||||
}
|
@ -1,153 +0,0 @@
|
||||
{
|
||||
"queries": {
|
||||
"version": [
|
||||
{
|
||||
"statement": "SELECT version()"
|
||||
}
|
||||
],
|
||||
"databases": [
|
||||
{
|
||||
"statement": "SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"access": [
|
||||
{
|
||||
"statement": "SHOW ACCESS"
|
||||
}
|
||||
],
|
||||
"quotas": [
|
||||
{
|
||||
"statement": "SHOW QUOTA"
|
||||
}
|
||||
],
|
||||
"db_engines": [
|
||||
{
|
||||
"statement": "SELECT engine, count() \"count\" FROM system.databases GROUP BY engine"
|
||||
}
|
||||
],
|
||||
"table_engines": [
|
||||
{
|
||||
"statement": "SELECT engine, count() \"count\" FROM system.tables WHERE database != 'system' GROUP BY engine"
|
||||
}
|
||||
],
|
||||
"dictionaries": [
|
||||
{
|
||||
"statement": "SELECT source, type, status, count() \"count\" FROM system.dictionaries GROUP BY source, type, status ORDER BY status DESC, source"
|
||||
}
|
||||
],
|
||||
"replicated_tables_by_delay": [
|
||||
{
|
||||
"statement": "SELECT database, table, is_leader, is_readonly, absolute_delay, queue_size, inserts_in_queue, merges_in_queue FROM system.replicas ORDER BY absolute_delay DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"replication_queue_by_oldest": [
|
||||
{
|
||||
"statement": "SELECT database, table, replica_name, position, node_name, type, source_replica, parts_to_merge, new_part_name, create_time, required_quorum, is_detach, is_currently_executing, num_tries, last_attempt_time, last_exception, concat( 'time: ', toString(last_postpone_time), ', number: ', toString(num_postponed), ', reason: ', postpone_reason ) postpone FROM system.replication_queue ORDER BY create_time ASC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"replicated_fetches": [
|
||||
{
|
||||
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", partition_id, result_part_name, result_part_path, total_size_bytes_compressed, bytes_read_compressed, source_replica_path, source_replica_hostname, source_replica_port, interserver_scheme, to_detached, thread_id FROM system.replicated_fetches"
|
||||
}
|
||||
],
|
||||
"tables_by_max_partition_count": [
|
||||
{
|
||||
"statement": "SELECT database, table, count() \"partitions\", sum(part_count) \"parts\", max(part_count) \"max_parts_per_partition\" FROM ( SELECT database, table, partition, count() \"part_count\" FROM system.parts WHERE active GROUP BY database, table, partition ) partitions GROUP BY database, table ORDER BY max_parts_per_partition DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"stack_traces": [
|
||||
{
|
||||
"statement": "SELECT '\\n' || arrayStringConcat( arrayMap( x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), arrayMap(x -> demangle(addressToSymbol(x)), trace) ), '\\n' ) AS trace FROM system.stack_trace"
|
||||
}
|
||||
],
|
||||
"crash_log": [
|
||||
{
|
||||
"statement": "SELECT event_time, signal, thread_id, query_id, '\\n' || arrayStringConcat(trace_full, '\\n') AS trace, version FROM system.crash_log ORDER BY event_time DESC"
|
||||
}
|
||||
],
|
||||
"merges": [
|
||||
{
|
||||
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", is_mutation, partition_id, result_part_path, source_part_paths, num_parts, formatReadableSize(total_size_bytes_compressed) \"total_size_compressed\", formatReadableSize(bytes_read_uncompressed) \"read_uncompressed\", formatReadableSize(bytes_written_uncompressed) \"written_uncompressed\", columns_written, formatReadableSize(memory_usage) \"memory_usage\", thread_id FROM system.merges",
|
||||
"constraint": ">=20.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", is_mutation, partition_id, num_parts, formatReadableSize(total_size_bytes_compressed) \"total_size_compressed\", formatReadableSize(bytes_read_uncompressed) \"read_uncompressed\", formatReadableSize(bytes_written_uncompressed) \"written_uncompressed\", columns_written, formatReadableSize(memory_usage) \"memory_usage\" FROM system.merges"
|
||||
}
|
||||
],
|
||||
"mutations": [
|
||||
{
|
||||
"statement": "SELECT database, table, mutation_id, command, create_time, parts_to_do_names, parts_to_do, is_done, latest_failed_part, latest_fail_time, latest_fail_reason FROM system.mutations WHERE NOT is_done ORDER BY create_time DESC",
|
||||
"constraint": ">=20.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT database, table, mutation_id, command, create_time, parts_to_do, is_done, latest_failed_part, latest_fail_time, latest_fail_reason FROM system.mutations WHERE NOT is_done ORDER BY create_time DESC"
|
||||
}
|
||||
],
|
||||
"recent_data_parts": [
|
||||
{
|
||||
"statement": "SELECT database, table, engine, partition_id, name, part_type, active, level, disk_name, path, marks, rows, bytes_on_disk, data_compressed_bytes, data_uncompressed_bytes, marks_bytes, modification_time, remove_time, refcount, is_frozen, min_date, max_date, min_time, max_time, min_block_number, max_block_number FROM system.parts WHERE modification_time > now() - INTERVAL 3 MINUTE ORDER BY modification_time DESC",
|
||||
"constraint": ">=20.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT database, table, engine, partition_id, name, active, level, path, marks, rows, bytes_on_disk, data_compressed_bytes, data_uncompressed_bytes, marks_bytes, modification_time, remove_time, refcount, is_frozen, min_date, max_date, min_time, max_time, min_block_number, max_block_number FROM system.parts WHERE modification_time > now() - INTERVAL 3 MINUTE ORDER BY modification_time DESC"
|
||||
}
|
||||
],
|
||||
"detached_parts": [
|
||||
{
|
||||
"statement": "SELECT database, table, partition_id, name, disk, reason, min_block_number, max_block_number, level FROM system.detached_parts"
|
||||
}
|
||||
],
|
||||
"processes": [
|
||||
{
|
||||
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, thread_ids, ProfileEvents, Settings FROM system.processes ORDER BY elapsed DESC",
|
||||
"constraint": ">=21.8"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.processes ORDER BY elapsed DESC",
|
||||
"constraint": ">=21.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.processes ORDER BY elapsed DESC"
|
||||
}
|
||||
],
|
||||
"top_queries_by_duration": [
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.8"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"top_queries_by_memory": [
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.8"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
],
|
||||
"failed_queries": [
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.8"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}",
|
||||
"constraint": ">=21.3"
|
||||
},
|
||||
{
|
||||
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
@ -1,159 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This collector collects the system db from database
|
||||
|
||||
type SummaryCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
type querySet struct {
|
||||
Queries map[string][]query `json:"queries"`
|
||||
}
|
||||
|
||||
type query struct {
|
||||
Statement string `json:"statement"`
|
||||
Constraint string `json:"constraint"`
|
||||
}
|
||||
|
||||
type ParameterTemplate struct {
|
||||
Limit int64
|
||||
}
|
||||
|
||||
//go:embed queries.json
|
||||
var queryFile []byte
|
||||
|
||||
func NewSummaryCollector(m *platform.ResourceManager) *SummaryCollector {
|
||||
return &SummaryCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SummaryCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(sc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
var queries querySet
|
||||
err = json.Unmarshal(queryFile, &queries)
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, errors.Wrap(err, "Unable to read queries from disk")
|
||||
}
|
||||
limit, err := config.ReadIntValue(conf, "row_limit")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
|
||||
paramTemplate := ParameterTemplate{
|
||||
Limit: limit,
|
||||
}
|
||||
frames := make(map[string]data.Frame)
|
||||
|
||||
serverVersion, err := getServerSemVersion(sc)
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Unable to read server version")
|
||||
}
|
||||
|
||||
var frameErrors []error
|
||||
for queryId, sqlQueries := range queries.Queries {
|
||||
// we find the first matching query that satisfies the current version. Empty version means ANY version is
|
||||
// supported
|
||||
for _, sqlQuery := range sqlQueries {
|
||||
var queryConstraint *semver.Constraints
|
||||
if sqlQuery.Constraint != "" {
|
||||
queryConstraint, err = semver.NewConstraint(sqlQuery.Constraint)
|
||||
if err != nil {
|
||||
//we try another one
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to parse version %s for query %s", sqlQuery.Constraint, queryId))
|
||||
continue
|
||||
}
|
||||
}
|
||||
if sqlQuery.Constraint == "" || queryConstraint.Check(serverVersion) {
|
||||
tmpl, err := template.New(queryId).Parse(sqlQuery.Statement)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to parse query %s", queryId))
|
||||
//we try another one
|
||||
continue
|
||||
}
|
||||
buf := new(bytes.Buffer)
|
||||
err = tmpl.Execute(buf, paramTemplate)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to process query %s template", queryId))
|
||||
//we try another one
|
||||
continue
|
||||
}
|
||||
frame, err := sc.resourceManager.DbClient.ExecuteStatement(queryId, buf.String())
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to execute query %s", queryId))
|
||||
//we try another one
|
||||
} else {
|
||||
frames[queryId] = frame
|
||||
// only 1 query executed
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
fErrors := data.FrameErrors{
|
||||
Errors: frameErrors,
|
||||
}
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: fErrors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func getServerSemVersion(sc *SummaryCollector) (*semver.Version, error) {
|
||||
serverVersion, err := sc.resourceManager.DbClient.Version()
|
||||
if err != nil {
|
||||
return &semver.Version{}, err
|
||||
}
|
||||
//drop the build number - it is not a semantic version
|
||||
versionComponents := strings.Split(serverVersion, ".")
|
||||
serverVersion = strings.Join(versionComponents[:len(versionComponents)-1], ".")
|
||||
return semver.NewVersion(serverVersion)
|
||||
}
|
||||
|
||||
func (sc *SummaryCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: 20,
|
||||
Param: config.NewParam("row_limit", "Limit rows on supported queries.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SummaryCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (sc *SummaryCollector) Description() string {
|
||||
return "Collects summary statistics on the database based on a set of known useful queries."
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("summary", func() (collectors.Collector, error) {
|
||||
return &SummaryCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,111 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSummaryConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for summary collector", func(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
summaryCollector := clickhouse.NewSummaryCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
conf := summaryCollector.Configuration()
|
||||
require.Len(t, conf.Params, 1)
|
||||
require.IsType(t, config.IntParam{}, conf.Params[0])
|
||||
limit, ok := conf.Params[0].(config.IntParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, limit.Required())
|
||||
require.Equal(t, limit.Name(), "row_limit")
|
||||
require.Equal(t, int64(20), limit.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSummaryCollection(t *testing.T) {
|
||||
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
versionFrame := test.NewFakeDataFrame("version", []string{"version()"},
|
||||
[][]interface{}{
|
||||
{"22.1.3.7"},
|
||||
},
|
||||
)
|
||||
client.QueryResponses["SELECT version()"] = &versionFrame
|
||||
databasesFrame := test.NewFakeDataFrame("databases", []string{"name", "engine", "tables", "partitions", "parts", "disk_size"},
|
||||
[][]interface{}{
|
||||
{"tutorial", "Atomic", 2, 2, 2, "1.70 GiB"},
|
||||
{"default", "Atomic", 5, 5, 6, "1.08 GiB"},
|
||||
{"system", "Atomic", 11, 24, 70, "1.05 GiB"},
|
||||
{"INFORMATION_SCHEMA", "Memory", 0, 0, 0, "0.00 B"},
|
||||
{"covid19db", "Atomic", 0, 0, 0, "0.00 B"},
|
||||
{"information_schema", "Memory", 0, 0, 0, "0.00 B"}})
|
||||
|
||||
client.QueryResponses["SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" "+
|
||||
"FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", "+
|
||||
"count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats "+
|
||||
"ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT 20"] = &databasesFrame
|
||||
|
||||
summaryCollector := clickhouse.NewSummaryCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
|
||||
t.Run("test default summary collection", func(t *testing.T) {
|
||||
bundle, errs := summaryCollector.Collect(config.Configuration{})
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, bundle.Errors.Errors, 30)
|
||||
require.NotNil(t, bundle)
|
||||
require.Len(t, bundle.Frames, 2)
|
||||
// check version frame
|
||||
require.Contains(t, bundle.Frames, "version")
|
||||
require.Equal(t, []string{"version()"}, bundle.Frames["version"].Columns())
|
||||
checkFrame(t, bundle.Frames["version"], versionFrame.Rows)
|
||||
//check databases frame
|
||||
require.Contains(t, bundle.Frames, "databases")
|
||||
require.Equal(t, []string{"name", "engine", "tables", "partitions", "parts", "disk_size"}, bundle.Frames["databases"].Columns())
|
||||
checkFrame(t, bundle.Frames["databases"], databasesFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test summary collection with limit", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: 1,
|
||||
Param: config.NewParam("row_limit", "Limit rows on supported queries.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
bundle, errs := summaryCollector.Collect(conf)
|
||||
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, bundle.Errors.Errors, 31)
|
||||
require.NotNil(t, bundle)
|
||||
// databases will be absent due to limit
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
// check version frame
|
||||
require.Contains(t, bundle.Frames, "version")
|
||||
require.Equal(t, []string{"version()"}, bundle.Frames["version"].Columns())
|
||||
checkFrame(t, bundle.Frames["version"], versionFrame.Rows)
|
||||
|
||||
client.QueryResponses["SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" "+
|
||||
"FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", "+
|
||||
"count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats "+
|
||||
"ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT 1"] = &databasesFrame
|
||||
bundle, errs = summaryCollector.Collect(conf)
|
||||
require.Empty(t, errs)
|
||||
require.Len(t, bundle.Errors.Errors, 30)
|
||||
require.NotNil(t, bundle)
|
||||
require.Len(t, bundle.Frames, 2)
|
||||
require.Contains(t, bundle.Frames, "version")
|
||||
//check databases frame
|
||||
require.Contains(t, bundle.Frames, "databases")
|
||||
require.Equal(t, []string{"name", "engine", "tables", "partitions", "parts", "disk_size"}, bundle.Frames["databases"].Columns())
|
||||
// this will parse as our mock client does not read statement (specifically the limit clause) when called with execute
|
||||
checkFrame(t, bundle.Frames["databases"], databasesFrame.Rows)
|
||||
})
|
||||
}
|
@ -1,165 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This collector collects the system db from database
|
||||
|
||||
type SystemDatabaseCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
const SystemDatabase = "system"
|
||||
|
||||
// ExcludeColumns columns if we need - this will be refined over time [table_name][columnA, columnB]
|
||||
var ExcludeColumns = map[string][]string{}
|
||||
|
||||
// BannedTables - Hardcoded list. These are always excluded even if the user doesn't specify in exclude_tables.
|
||||
//Attempts to export will work but we will warn
|
||||
var BannedTables = []string{"numbers", "zeros"}
|
||||
|
||||
// OrderBy contains a map of tables to an order by clause - by default we don't order table dumps
|
||||
var OrderBy = map[string]data.OrderBy{
|
||||
"errors": {
|
||||
Column: "last_error_message",
|
||||
Order: data.Desc,
|
||||
},
|
||||
"replication_queue": {
|
||||
Column: "create_time",
|
||||
Order: data.Asc,
|
||||
},
|
||||
}
|
||||
|
||||
func NewSystemDatabaseCollector(m *platform.ResourceManager) *SystemDatabaseCollector {
|
||||
return &SystemDatabaseCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SystemDatabaseCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(sc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
includeTables, err := config.ReadStringListValues(conf, "include_tables")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
excludeTables, err := config.ReadStringListValues(conf, "exclude_tables")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
rowLimit, err := config.ReadIntValue(conf, "row_limit")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
excludeTables = checkBannedTables(includeTables, excludeTables)
|
||||
ds, err := sc.readSystemAllTables(includeTables, excludeTables, rowLimit)
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// all banned tables are added to excluded if not present and not specified in included. Returns new exclude_tables list.
|
||||
func checkBannedTables(includeTables []string, excludeTables []string) []string {
|
||||
for _, bannedTable := range BannedTables {
|
||||
//if its specified we don't add to our exclude list - explicitly included tables take precedence
|
||||
if !utils.Contains(includeTables, bannedTable) && !utils.Contains(excludeTables, bannedTable) {
|
||||
excludeTables = append(excludeTables, bannedTable)
|
||||
}
|
||||
}
|
||||
return excludeTables
|
||||
}
|
||||
|
||||
func (sc *SystemDatabaseCollector) readSystemAllTables(include []string, exclude []string, limit int64) (*data.DiagnosticBundle, error) {
|
||||
tableNames, err := sc.resourceManager.DbClient.ReadTableNamesForDatabase(SystemDatabase)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var frameErrors []error
|
||||
if include != nil {
|
||||
// nil means include everything
|
||||
tableNames = utils.Intersection(tableNames, include)
|
||||
if len(tableNames) != len(include) {
|
||||
// we warn that some included tables aren't present in db
|
||||
frameErrors = append(frameErrors, fmt.Errorf("some tables specified in the include_tables are not in the system database and will not be exported: %v",
|
||||
utils.Distinct(include, tableNames)))
|
||||
}
|
||||
}
|
||||
|
||||
// exclude tables unless specified in includes
|
||||
excludedTables := utils.Distinct(exclude, include)
|
||||
tableNames = utils.Distinct(tableNames, excludedTables)
|
||||
frames := make(map[string]data.Frame)
|
||||
|
||||
for _, tableName := range tableNames {
|
||||
var excludeColumns []string
|
||||
if _, ok := ExcludeColumns[tableName]; ok {
|
||||
excludeColumns = ExcludeColumns[tableName]
|
||||
}
|
||||
orderBy := data.OrderBy{}
|
||||
if _, ok := OrderBy[tableName]; ok {
|
||||
orderBy = OrderBy[tableName]
|
||||
}
|
||||
frame, err := sc.resourceManager.DbClient.ReadTable(SystemDatabase, tableName, excludeColumns, orderBy, limit)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to collect %s", tableName))
|
||||
} else {
|
||||
frames[tableName] = frame
|
||||
}
|
||||
}
|
||||
|
||||
fErrors := data.FrameErrors{
|
||||
Errors: frameErrors,
|
||||
}
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: fErrors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (sc *SystemDatabaseCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Specify list of tables to collect. Takes precedence over exclude_tables. If not specified (default) all tables except exclude_tables.", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
Values: []string{"licenses", "distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper", "aggregate_function_combinators", "collations", "contributors", "data_type_families", "formats", "graphite_retentions", "numbers", "numbers_mt", "one", "parts_columns", "projection_parts", "projection_parts_columns", "table_engines", "time_zones", "zeros", "zeros_mt"},
|
||||
Param: config.NewParam("exclude_tables", "Specify list of tables to not collect.", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 100000,
|
||||
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SystemDatabaseCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (sc *SystemDatabaseCollector) Description() string {
|
||||
return "Collects all tables in the system database, except those which have been excluded."
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("system_db", func() (collectors.Collector, error) {
|
||||
return &SystemDatabaseCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,366 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSystemConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for system db collector", func(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
systemDbCollector := clickhouse.NewSystemDatabaseCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
conf := systemDbCollector.Configuration()
|
||||
require.Len(t, conf.Params, 3)
|
||||
// check first param
|
||||
require.IsType(t, config.StringListParam{}, conf.Params[0])
|
||||
includeTables, ok := conf.Params[0].(config.StringListParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, includeTables.Required())
|
||||
require.Equal(t, includeTables.Name(), "include_tables")
|
||||
require.Nil(t, includeTables.Values)
|
||||
// check second param
|
||||
require.IsType(t, config.StringListParam{}, conf.Params[1])
|
||||
excludeTables, ok := conf.Params[1].(config.StringListParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, excludeTables.Required())
|
||||
require.Equal(t, "exclude_tables", excludeTables.Name())
|
||||
require.Equal(t, []string{"licenses", "distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper", "aggregate_function_combinators", "collations", "contributors", "data_type_families", "formats", "graphite_retentions", "numbers", "numbers_mt", "one", "parts_columns", "projection_parts", "projection_parts_columns", "table_engines", "time_zones", "zeros", "zeros_mt"}, excludeTables.Values)
|
||||
// check third param
|
||||
require.IsType(t, config.IntParam{}, conf.Params[2])
|
||||
rowLimit, ok := conf.Params[2].(config.IntParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, rowLimit.Required())
|
||||
require.Equal(t, "row_limit", rowLimit.Name())
|
||||
require.Equal(t, int64(100000), rowLimit.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSystemDbCollect(t *testing.T) {
|
||||
|
||||
diskFrame := test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space", "keep_free_space", "type"},
|
||||
[][]interface{}{
|
||||
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352, "", "local"},
|
||||
},
|
||||
)
|
||||
clusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
|
||||
[][]interface{}{
|
||||
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
|
||||
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9000, 1, "default", "", 0, 0, 0},
|
||||
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9000, 1, "default", "", 0, 0, 0},
|
||||
},
|
||||
)
|
||||
userFrame := test.NewFakeDataFrame("users", []string{"name", "id", "storage", "auth_type", "auth_params", "host_ip", "host_names", "host_names_regexp", "host_names_like"},
|
||||
[][]interface{}{
|
||||
{"default", "94309d50-4f52-5250-31bd-74fecac179db,users.xml,plaintext_password", "sha256_password", []string{"::0"}, []string{}, []string{}, []string{}},
|
||||
},
|
||||
)
|
||||
|
||||
dbTables := map[string][]string{
|
||||
clickhouse.SystemDatabase: {"disks", "clusters", "users"},
|
||||
}
|
||||
client := test.NewFakeClickhouseClient(dbTables)
|
||||
|
||||
client.QueryResponses["SELECT * FROM system.disks LIMIT 100000"] = &diskFrame
|
||||
client.QueryResponses["SELECT * FROM system.clusters LIMIT 100000"] = &clusterFrame
|
||||
client.QueryResponses["SELECT * FROM system.users LIMIT 100000"] = &userFrame
|
||||
systemDbCollector := clickhouse.NewSystemDatabaseCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
|
||||
t.Run("test default system db collection", func(t *testing.T) {
|
||||
diagSet, err := systemDbCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
// disks frame
|
||||
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
|
||||
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
|
||||
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
|
||||
// clusters frame
|
||||
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
|
||||
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
|
||||
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
|
||||
// users frame
|
||||
require.Equal(t, "users", diagSet.Frames["users"].Name())
|
||||
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
|
||||
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test when we pass an includes", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks"},
|
||||
Param: config.NewParam("include_tables", "Exclusion", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 1)
|
||||
// disks frame
|
||||
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
|
||||
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
|
||||
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
// test excludes
|
||||
t.Run("test when we pass an excludes", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
Values: []string{"disks"},
|
||||
Param: config.NewParam("exclude_tables", "Exclusion", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 2)
|
||||
// clusters frame
|
||||
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
|
||||
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
|
||||
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
|
||||
// users frame
|
||||
require.Equal(t, "users", diagSet.Frames["users"].Name())
|
||||
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
|
||||
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
// test includes which isn't in the list
|
||||
t.Run("test when we pass an invalid includes", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks", "invalid"},
|
||||
Param: config.NewParam("include_tables", "Exclusion", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 1)
|
||||
require.Equal(t, diagSet.Errors.Error(), "some tables specified in the include_tables are not in the "+
|
||||
"system database and will not be exported: [invalid]")
|
||||
require.Len(t, diagSet.Frames, 1)
|
||||
// disks frame
|
||||
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
|
||||
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
|
||||
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test when we use a table with excluded fields", func(t *testing.T) {
|
||||
excludeDefault := clickhouse.ExcludeColumns
|
||||
client.QueryResponses["SELECT * EXCEPT(keep_free_space,type) FROM system.disks LIMIT 100000"] = &diskFrame
|
||||
clickhouse.ExcludeColumns = map[string][]string{
|
||||
"disks": {"keep_free_space", "type"},
|
||||
}
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks"},
|
||||
Param: config.NewParam("include_tables", "Exclusion", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 1)
|
||||
// disks frame
|
||||
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
|
||||
require.Equal(t, []string{"name", "path", "free_space", "total_space"}, diagSet.Frames["disks"].Columns())
|
||||
eDiskFrame := test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space"},
|
||||
[][]interface{}{
|
||||
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352},
|
||||
},
|
||||
)
|
||||
checkFrame(t, diagSet.Frames["disks"], eDiskFrame.Rows)
|
||||
clickhouse.ExcludeColumns = excludeDefault
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test with a low row limit", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: 1,
|
||||
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
client.QueryResponses["SELECT * FROM system.disks LIMIT 1"] = &diskFrame
|
||||
client.QueryResponses["SELECT * FROM system.clusters LIMIT 1"] = &clusterFrame
|
||||
client.QueryResponses["SELECT * FROM system.users LIMIT 1"] = &userFrame
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
// clusters frame
|
||||
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
|
||||
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
|
||||
lClusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
|
||||
[][]interface{}{
|
||||
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
|
||||
})
|
||||
checkFrame(t, diagSet.Frames["clusters"], lClusterFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test with a negative low row limit", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
Value: -23,
|
||||
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
client.QueryResponses["SELECT * FROM system.clusters"] = &clusterFrame
|
||||
client.QueryResponses["SELECT * FROM system.disks"] = &diskFrame
|
||||
client.QueryResponses["SELECT * FROM system.users"] = &userFrame
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
// disks frame
|
||||
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
|
||||
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
|
||||
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
|
||||
// clusters frame
|
||||
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
|
||||
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
|
||||
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
|
||||
// users frame
|
||||
require.Equal(t, "users", diagSet.Frames["users"].Name())
|
||||
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
|
||||
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test that includes overrides excludes", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks"},
|
||||
Param: config.NewParam("exclude_tables", "Excluded", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks", "clusters", "users"},
|
||||
Param: config.NewParam("include_tables", "Included", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test banned", func(t *testing.T) {
|
||||
bannedDefault := clickhouse.BannedTables
|
||||
clickhouse.BannedTables = []string{"disks"}
|
||||
diagSet, err := systemDbCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 2)
|
||||
require.Contains(t, diagSet.Frames, "users")
|
||||
require.Contains(t, diagSet.Frames, "clusters")
|
||||
clickhouse.BannedTables = bannedDefault
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("test banned unless included", func(t *testing.T) {
|
||||
bannedDefault := clickhouse.BannedTables
|
||||
clickhouse.BannedTables = []string{"disks"}
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: []string{"disks", "clusters", "users"},
|
||||
Param: config.NewParam("include_tables", "Included", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
diagSet, err := systemDbCollector.Collect(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
require.Contains(t, diagSet.Frames, "disks")
|
||||
require.Contains(t, diagSet.Frames, "users")
|
||||
require.Contains(t, diagSet.Frames, "clusters")
|
||||
clickhouse.BannedTables = bannedDefault
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
t.Run("tables are ordered if configured", func(t *testing.T) {
|
||||
defaultOrderBy := clickhouse.OrderBy
|
||||
clickhouse.OrderBy = map[string]data.OrderBy{
|
||||
"clusters": {
|
||||
Column: "shard_num",
|
||||
Order: data.Desc,
|
||||
},
|
||||
}
|
||||
client.QueryResponses["SELECT * FROM system.clusters ORDER BY shard_num DESC LIMIT 100000"] = &clusterFrame
|
||||
diagSet, err := systemDbCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 3)
|
||||
clickhouse.OrderBy = defaultOrderBy
|
||||
oClusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
|
||||
[][]interface{}{
|
||||
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9000, 1, "default", "", 0, 0, 0},
|
||||
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9000, 1, "default", "", 0, 0, 0},
|
||||
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
|
||||
},
|
||||
)
|
||||
checkFrame(t, diagSet.Frames["clusters"], oClusterFrame.Rows)
|
||||
client.Reset()
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func checkFrame(t *testing.T, frame data.Frame, rows [][]interface{}) {
|
||||
i := 0
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.ElementsMatch(t, rows[i], values)
|
||||
i += 1
|
||||
}
|
||||
require.Equal(t, i, len(rows))
|
||||
}
|
@ -1,153 +0,0 @@
|
||||
package clickhouse
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/bmatcuk/doublestar/v4"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
// This collector collects the system zookeeper db
|
||||
|
||||
type ZookeeperCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewZookeeperCollector(m *platform.ResourceManager) *ZookeeperCollector {
|
||||
return &ZookeeperCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (zkc *ZookeeperCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(zkc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
|
||||
pathPattern, err := config.ReadStringValue(conf, "path_pattern")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
defaultPattern, _ := zkc.Configuration().GetConfigParam("path_pattern")
|
||||
if defaultPattern.(config.StringParam).Value != pathPattern {
|
||||
log.Warn().Msgf("Using non default zookeeper glob pattern [%s] - this can potentially cause high query load", pathPattern)
|
||||
}
|
||||
maxDepth, err := config.ReadIntValue(conf, "max_depth")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
rowLimit, err := config.ReadIntValue(conf, "row_limit")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
// we use doublestar for globs as it provides us with ** but also allows us to identify prefix or base paths
|
||||
if !doublestar.ValidatePattern(pathPattern) {
|
||||
return &data.DiagnosticBundle{}, errors.Wrapf(err, "%s is not a valid pattern", pathPattern)
|
||||
}
|
||||
base, _ := doublestar.SplitPattern(pathPattern)
|
||||
frames := make(map[string]data.Frame)
|
||||
hFrame, frameErrors := zkc.collectSubFrames(base, pathPattern, rowLimit, 0, maxDepth)
|
||||
fErrors := data.FrameErrors{
|
||||
Errors: frameErrors,
|
||||
}
|
||||
frames["zookeeper_db"] = hFrame
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: fErrors,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// recursively iterates over the zookeeper sub tables to a max depth, applying the filter and max rows per table
|
||||
func (zkc *ZookeeperCollector) collectSubFrames(path, pathPattern string, rowLimit, currentDepth, maxDepth int64) (data.HierarchicalFrame, []error) {
|
||||
var frameErrors []error
|
||||
var subFrames []data.HierarchicalFrame
|
||||
|
||||
currentDepth += 1
|
||||
if currentDepth == maxDepth {
|
||||
return data.HierarchicalFrame{}, frameErrors
|
||||
}
|
||||
match, err := doublestar.PathMatch(pathPattern, path)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Path match failed for pattern %s with path %s", pathPattern, path))
|
||||
return data.HierarchicalFrame{}, frameErrors
|
||||
}
|
||||
// we allow a single level to be examined or we never get going
|
||||
if !match && currentDepth > 1 {
|
||||
return data.HierarchicalFrame{}, frameErrors
|
||||
}
|
||||
frame, err := zkc.resourceManager.DbClient.ExecuteStatement(path, fmt.Sprintf("SELECT name FROM system.zookeeper WHERE path='%s' LIMIT %d", path, rowLimit))
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read zookeeper table path for sub paths %s", path))
|
||||
return data.HierarchicalFrame{}, frameErrors
|
||||
}
|
||||
|
||||
// this isn't ideal, we add re-execute the query to our collection as this will be consumed by the output lazily
|
||||
outputFrame, err := zkc.resourceManager.DbClient.ExecuteStatement(path, fmt.Sprintf("SELECT * FROM system.zookeeper WHERE path='%s' LIMIT %d", path, rowLimit))
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read zookeeper table path %s", path))
|
||||
return data.HierarchicalFrame{}, frameErrors
|
||||
}
|
||||
frameComponents := strings.Split(path, "/")
|
||||
frameId := frameComponents[len(frameComponents)-1]
|
||||
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "unable to read frame %s", frame.Name()))
|
||||
return data.NewHierarchicalFrame(frameId, outputFrame, subFrames), frameErrors
|
||||
}
|
||||
if !ok {
|
||||
return data.NewHierarchicalFrame(frameId, outputFrame, subFrames), frameErrors
|
||||
}
|
||||
subName := fmt.Sprintf("%v", values[0])
|
||||
subPath := fmt.Sprintf("%s/%s", path, subName)
|
||||
subFrame, errs := zkc.collectSubFrames(subPath, pathPattern, rowLimit, currentDepth, maxDepth)
|
||||
if subFrame.Name() != "" {
|
||||
subFrames = append(subFrames, subFrame)
|
||||
}
|
||||
frameErrors = append(frameErrors, errs...)
|
||||
}
|
||||
}
|
||||
|
||||
func (zkc *ZookeeperCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "/clickhouse/{task_queue}/**",
|
||||
Param: config.NewParam("path_pattern", "Glob pattern for zookeeper path matching. Change with caution.", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 8,
|
||||
Param: config.NewParam("max_depth", "Max depth for zookeeper navigation.", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 10,
|
||||
Param: config.NewParam("row_limit", "Maximum number of rows/sub nodes to collect/expand from any zookeeper leaf. Negative values mean unlimited.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (zkc *ZookeeperCollector) IsDefault() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (zkc *ZookeeperCollector) Description() string {
|
||||
return "Collects Zookeeper information available within ClickHouse."
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("zookeeper_db", func() (collectors.Collector, error) {
|
||||
return &ZookeeperCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,102 +0,0 @@
|
||||
package clickhouse_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestZookeeperConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for system zookeeper collector", func(t *testing.T) {
|
||||
client := test.NewFakeClickhouseClient(make(map[string][]string))
|
||||
zkCollector := clickhouse.NewZookeeperCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
conf := zkCollector.Configuration()
|
||||
require.Len(t, conf.Params, 3)
|
||||
// check first param
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
pathPattern, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, pathPattern.Required())
|
||||
require.Equal(t, pathPattern.Name(), "path_pattern")
|
||||
require.Equal(t, "/clickhouse/{task_queue}/**", pathPattern.Value)
|
||||
// check second param
|
||||
require.IsType(t, config.IntParam{}, conf.Params[1])
|
||||
maxDepth, ok := conf.Params[1].(config.IntParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, maxDepth.Required())
|
||||
require.Equal(t, "max_depth", maxDepth.Name())
|
||||
require.Equal(t, int64(8), maxDepth.Value)
|
||||
// check third param
|
||||
require.IsType(t, config.IntParam{}, conf.Params[2])
|
||||
rowLimit, ok := conf.Params[2].(config.IntParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, rowLimit.Required())
|
||||
require.Equal(t, "row_limit", rowLimit.Name())
|
||||
require.Equal(t, int64(10), rowLimit.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestZookeeperCollect(t *testing.T) {
|
||||
level1 := test.NewFakeDataFrame("level_1", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
|
||||
[][]interface{}{
|
||||
{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
|
||||
{"task_queue", "", "4", "4", "2022-02-22 13:30:15", "2022-02-22 13:30:15", "0", "1", "0", "0", "0", "1", "5", "/clickhouse"},
|
||||
{"copytasks", "", "525608", "525608", "2022-03-09 13:47:39", "2022-03-09 13:47:39", "0", "7", "0", "0", "0", "7", "526100", "/clickhouse"},
|
||||
},
|
||||
)
|
||||
level2 := test.NewFakeDataFrame("level_2", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
|
||||
[][]interface{}{
|
||||
{"ddl", "", "5", "5", "2022-02-22 13:30:15", "2022-02-22 13:30:15", "0", "0", "0", "0", "0", "0", "5", "/clickhouse/task_queue"},
|
||||
},
|
||||
)
|
||||
level3 := test.NewFakeDataFrame("level_2", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
|
||||
[][]interface{}{},
|
||||
)
|
||||
dbTables := map[string][]string{
|
||||
clickhouse.SystemDatabase: {"zookeeper"},
|
||||
}
|
||||
client := test.NewFakeClickhouseClient(dbTables)
|
||||
|
||||
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse' LIMIT 10"] = &level1
|
||||
// can't reuse the frame as the first frame will be iterated as part of the recursive zookeeper search performed by the collector
|
||||
cLevel1 := test.NewFakeDataFrame("level_1", level1.Columns(), level1.Rows)
|
||||
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse' LIMIT 10"] = &cLevel1
|
||||
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse/task_queue' LIMIT 10"] = &level2
|
||||
cLevel2 := test.NewFakeDataFrame("level_2", level2.Columns(), level2.Rows)
|
||||
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse/task_queue' LIMIT 10"] = &cLevel2
|
||||
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse/task_queue/ddl' LIMIT 10"] = &level3
|
||||
cLevel3 := test.NewFakeDataFrame("level_3", level3.Columns(), level3.Rows)
|
||||
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse/task_queue/ddl' LIMIT 10"] = &cLevel3
|
||||
|
||||
zKCollector := clickhouse.NewZookeeperCollector(&platform.ResourceManager{
|
||||
DbClient: client,
|
||||
})
|
||||
|
||||
t.Run("test default zookeeper collection", func(t *testing.T) {
|
||||
diagSet, err := zKCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 1)
|
||||
require.Contains(t, diagSet.Frames, "zookeeper_db")
|
||||
require.Equal(t, "clickhouse", diagSet.Frames["zookeeper_db"].Name())
|
||||
require.IsType(t, data.HierarchicalFrame{}, diagSet.Frames["zookeeper_db"])
|
||||
checkFrame(t, diagSet.Frames["zookeeper_db"], level1.Rows)
|
||||
require.Equal(t, level1.Columns(), diagSet.Frames["zookeeper_db"].Columns())
|
||||
hierarchicalFrame := diagSet.Frames["zookeeper_db"].(data.HierarchicalFrame)
|
||||
require.Len(t, hierarchicalFrame.SubFrames, 1)
|
||||
checkFrame(t, hierarchicalFrame.SubFrames[0], cLevel2.Rows)
|
||||
require.Equal(t, cLevel2.Columns(), hierarchicalFrame.SubFrames[0].Columns())
|
||||
hierarchicalFrame = hierarchicalFrame.SubFrames[0]
|
||||
require.Len(t, hierarchicalFrame.SubFrames, 1)
|
||||
checkFrame(t, hierarchicalFrame.SubFrames[0], cLevel3.Rows)
|
||||
require.Equal(t, cLevel3.Columns(), hierarchicalFrame.SubFrames[0].Columns())
|
||||
})
|
||||
}
|
@ -1,75 +0,0 @@
|
||||
package collectors
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type Collector interface {
|
||||
Collect(config config.Configuration) (*data.DiagnosticBundle, error)
|
||||
Configuration() config.Configuration
|
||||
IsDefault() bool
|
||||
Description() string
|
||||
}
|
||||
|
||||
// Register can be called from init() on a collector in this package
|
||||
// It will automatically be added to the Collectors map to be called externally
|
||||
func Register(name string, collector CollectorFactory) {
|
||||
if name == "diag_trace" {
|
||||
// we use this to record errors and warnings
|
||||
log.Fatal().Msgf("diag_trace is a reserved collector name")
|
||||
}
|
||||
// names must be unique
|
||||
if _, ok := Collectors[name]; ok {
|
||||
log.Fatal().Msgf("More than 1 collector is trying to register under the name %s. Names must be unique.", name)
|
||||
}
|
||||
Collectors[name] = collector
|
||||
}
|
||||
|
||||
// CollectorFactory lets us use a closure to get instances of the collector struct
|
||||
type CollectorFactory func() (Collector, error)
|
||||
|
||||
var Collectors = map[string]CollectorFactory{}
|
||||
|
||||
func GetCollectorNames(defaultOnly bool) []string {
|
||||
// can't pre-allocate as not all maybe default
|
||||
var collectors []string
|
||||
for collectorName := range Collectors {
|
||||
collector, err := GetCollectorByName(collectorName)
|
||||
if err != nil {
|
||||
log.Fatal().Err(err)
|
||||
}
|
||||
if !defaultOnly || (defaultOnly && collector.IsDefault()) {
|
||||
collectors = append(collectors, collectorName)
|
||||
}
|
||||
}
|
||||
return collectors
|
||||
}
|
||||
|
||||
func GetCollectorByName(name string) (Collector, error) {
|
||||
if collectorFactory, ok := Collectors[name]; ok {
|
||||
//do something here
|
||||
collector, err := collectorFactory()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "collector %s could not be initialized", name)
|
||||
}
|
||||
return collector, nil
|
||||
}
|
||||
return nil, fmt.Errorf("%s is not a valid collector name", name)
|
||||
}
|
||||
|
||||
func BuildConfigurationOptions() (map[string]config.Configuration, error) {
|
||||
configurations := make(map[string]config.Configuration)
|
||||
for name, collectorFactory := range Collectors {
|
||||
collector, err := collectorFactory()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "collector %s could not be initialized", name)
|
||||
}
|
||||
configurations[name] = collector.Configuration()
|
||||
}
|
||||
return configurations, nil
|
||||
}
|
@ -1,57 +0,0 @@
|
||||
package collectors_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetCollectorNames(t *testing.T) {
|
||||
t.Run("can get all collector names", func(t *testing.T) {
|
||||
collectorNames := collectors.GetCollectorNames(false)
|
||||
require.ElementsMatch(t, []string{"system_db", "config", "summary", "system", "logs", "db_logs", "file", "command", "zookeeper_db"}, collectorNames)
|
||||
})
|
||||
|
||||
t.Run("can get default collector names", func(t *testing.T) {
|
||||
collectorNames := collectors.GetCollectorNames(true)
|
||||
require.ElementsMatch(t, []string{"system_db", "config", "summary", "system", "logs", "db_logs"}, collectorNames)
|
||||
})
|
||||
}
|
||||
|
||||
func TestGetCollectorByName(t *testing.T) {
|
||||
|
||||
t.Run("can get collector by name", func(t *testing.T) {
|
||||
collector, err := collectors.GetCollectorByName("system_db")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, clickhouse.NewSystemDatabaseCollector(platform.GetResourceManager()), collector)
|
||||
})
|
||||
|
||||
t.Run("fails on non existing collector", func(t *testing.T) {
|
||||
collector, err := collectors.GetCollectorByName("random")
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "random is not a valid collector name", err.Error())
|
||||
require.Nil(t, collector)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildConfigurationOptions(t *testing.T) {
|
||||
|
||||
t.Run("can get all collector configurations", func(t *testing.T) {
|
||||
configs, err := collectors.BuildConfigurationOptions()
|
||||
require.Nil(t, err)
|
||||
require.Len(t, configs, 9)
|
||||
require.Contains(t, configs, "system_db")
|
||||
require.Contains(t, configs, "config")
|
||||
require.Contains(t, configs, "summary")
|
||||
require.Contains(t, configs, "system")
|
||||
require.Contains(t, configs, "logs")
|
||||
require.Contains(t, configs, "db_logs")
|
||||
require.Contains(t, configs, "file")
|
||||
require.Contains(t, configs, "command")
|
||||
require.Contains(t, configs, "zookeeper_db")
|
||||
})
|
||||
}
|
@ -1,90 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"os/exec"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/google/shlex"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This collector runs a user specified command and collects it to a file
|
||||
|
||||
type CommandCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewCommandCollector(m *platform.ResourceManager) *CommandCollector {
|
||||
return &CommandCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(c.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
command, err := config.ReadStringValue(conf, "command")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
var frameErrors []error
|
||||
// shlex to split the commands and args
|
||||
cmdArgs, err := shlex.Split(command)
|
||||
if err != nil || len(cmdArgs) == 0 {
|
||||
return &data.DiagnosticBundle{}, errors.Wrap(err, "Unable to parse command")
|
||||
}
|
||||
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
err = cmd.Run()
|
||||
var sError string
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrap(err, "Unable to execute command"))
|
||||
sError = err.Error()
|
||||
}
|
||||
memoryFrame := data.NewMemoryFrame("output", []string{"command", "stdout", "stderr", "error"}, [][]interface{}{
|
||||
{command, stdout.String(), stderr.String(), sError},
|
||||
})
|
||||
return &data.DiagnosticBundle{
|
||||
Errors: data.FrameErrors{Errors: frameErrors},
|
||||
Frames: map[string]data.Frame{
|
||||
"output": memoryFrame,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *CommandCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("command", "Command to execute", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (c *CommandCollector) IsDefault() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *CommandCollector) Description() string {
|
||||
return "Allows collection of the output from a user specified command"
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("command", func() (collectors.Collector, error) {
|
||||
return &CommandCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,107 +0,0 @@
|
||||
package system_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestCommandConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for file collector", func(t *testing.T) {
|
||||
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
|
||||
conf := commandCollector.Configuration()
|
||||
require.Len(t, conf.Params, 1)
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
command, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.True(t, command.Required())
|
||||
require.Equal(t, command.Name(), "command")
|
||||
require.Equal(t, "", command.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestCommandCollect(t *testing.T) {
|
||||
t.Run("test simple command with args", func(t *testing.T) {
|
||||
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
|
||||
bundle, err := commandCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "ls -l ../../../testdata",
|
||||
Param: config.NewParam("command", "Command to execute", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, bundle.Errors.Errors)
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
require.Contains(t, bundle.Frames, "output")
|
||||
require.Equal(t, bundle.Frames["output"].Columns(), []string{"command", "stdout", "stderr", "error"})
|
||||
memFrame := bundle.Frames["output"].(data.MemoryFrame)
|
||||
values, ok, err := memFrame.Next()
|
||||
require.True(t, ok)
|
||||
require.Nil(t, err)
|
||||
fmt.Println(values)
|
||||
require.Len(t, values, 4)
|
||||
require.Equal(t, "ls -l ../../../testdata", values[0])
|
||||
require.Contains(t, values[1], "configs")
|
||||
require.Contains(t, values[1], "docker")
|
||||
require.Contains(t, values[1], "log")
|
||||
require.Equal(t, "", values[2])
|
||||
require.Equal(t, "", values[3])
|
||||
values, ok, err = memFrame.Next()
|
||||
require.False(t, ok)
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, values)
|
||||
})
|
||||
|
||||
t.Run("test empty command", func(t *testing.T) {
|
||||
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
|
||||
bundle, err := commandCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("command", "Command to execute", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Equal(t, "parameter command is invalid - command cannot be empty", err.Error())
|
||||
require.Equal(t, &data.DiagnosticBundle{}, bundle)
|
||||
})
|
||||
|
||||
t.Run("test invalid command", func(t *testing.T) {
|
||||
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
|
||||
bundle, err := commandCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "ls --invalid ../../../testdata",
|
||||
Param: config.NewParam("command", "Command to execute", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
// commands may error with output - we still capture on stderr
|
||||
require.Nil(t, err)
|
||||
require.Len(t, bundle.Errors.Errors, 1)
|
||||
require.Equal(t, "Unable to execute command: exit status 2", bundle.Errors.Errors[0].Error())
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
require.Contains(t, bundle.Frames, "output")
|
||||
require.Equal(t, bundle.Frames["output"].Columns(), []string{"command", "stdout", "stderr", "error"})
|
||||
memFrame := bundle.Frames["output"].(data.MemoryFrame)
|
||||
values, ok, err := memFrame.Next()
|
||||
require.True(t, ok)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, values, 4)
|
||||
require.Equal(t, "ls --invalid ../../../testdata", values[0])
|
||||
require.Equal(t, "", values[1])
|
||||
// exact values here may vary on platform
|
||||
require.NotEmpty(t, values[2])
|
||||
require.NotEmpty(t, values[3])
|
||||
})
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
"github.com/yargevad/filepathx"
|
||||
)
|
||||
|
||||
// This collector collects arbitrary user files
|
||||
|
||||
type FileCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewFileCollector(m *platform.ResourceManager) *FileCollector {
|
||||
return &FileCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
conf, err := conf.ValidateConfig(f.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
filePattern, err := config.ReadStringValue(conf, "file_pattern")
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
|
||||
var frameErrors []error
|
||||
// this util package supports recursive file matching e.g. /**/*
|
||||
matches, err := filepathx.Glob(filePattern)
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Invalid file_pattern \"%s\"", filePattern)
|
||||
}
|
||||
|
||||
if len(matches) == 0 {
|
||||
frameErrors = append(frameErrors, errors.New("0 files match glob pattern"))
|
||||
return &data.DiagnosticBundle{
|
||||
Errors: data.FrameErrors{Errors: frameErrors},
|
||||
}, nil
|
||||
}
|
||||
|
||||
var filePaths []string
|
||||
for _, match := range matches {
|
||||
fi, err := os.Stat(match)
|
||||
if err != nil {
|
||||
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read file %s", match))
|
||||
}
|
||||
if !fi.IsDir() {
|
||||
log.Debug().Msgf("Collecting file %s", match)
|
||||
filePaths = append(filePaths, match)
|
||||
}
|
||||
}
|
||||
|
||||
frame := data.NewFileFrame("collection", filePaths)
|
||||
|
||||
return &data.DiagnosticBundle{
|
||||
Errors: data.FrameErrors{Errors: frameErrors},
|
||||
Frames: map[string]data.Frame{
|
||||
"collection": frame,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *FileCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *FileCollector) IsDefault() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *FileCollector) Description() string {
|
||||
return "Allows collection of user specified files"
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("file", func() (collectors.Collector, error) {
|
||||
return &FileCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,110 +0,0 @@
|
||||
package system_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestFileConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for file collector", func(t *testing.T) {
|
||||
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
|
||||
conf := fileCollector.Configuration()
|
||||
require.Len(t, conf.Params, 1)
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
filePattern, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.True(t, filePattern.Required())
|
||||
require.Equal(t, filePattern.Name(), "file_pattern")
|
||||
require.Equal(t, "", filePattern.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestFileCollect(t *testing.T) {
|
||||
|
||||
t.Run("test filter patterns work", func(t *testing.T) {
|
||||
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
|
||||
bundle, err := fileCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "../../../testdata/**/*.xml",
|
||||
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, bundle.Errors.Errors)
|
||||
checkFileBundle(t, bundle,
|
||||
[]string{"../../../testdata/configs/include/xml/server-include.xml",
|
||||
"../../../testdata/configs/include/xml/user-include.xml",
|
||||
"../../../testdata/configs/xml/config.xml",
|
||||
"../../../testdata/configs/xml/users.xml",
|
||||
"../../../testdata/configs/xml/users.d/default-password.xml",
|
||||
"../../../testdata/configs/yandex_xml/config.xml",
|
||||
"../../../testdata/docker/admin.xml",
|
||||
"../../../testdata/docker/custom.xml"})
|
||||
})
|
||||
|
||||
t.Run("invalid file patterns are detected", func(t *testing.T) {
|
||||
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
|
||||
bundle, err := fileCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "parameter file_pattern is invalid - file_pattern cannot be empty", err.Error())
|
||||
require.Equal(t, &data.DiagnosticBundle{}, bundle)
|
||||
})
|
||||
|
||||
t.Run("check empty matches are reported", func(t *testing.T) {
|
||||
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
|
||||
bundle, err := fileCollector.Collect(config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "../../../testdata/**/*.random",
|
||||
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Nil(t, bundle.Frames)
|
||||
require.Len(t, bundle.Errors.Errors, 1)
|
||||
require.Equal(t, "0 files match glob pattern", bundle.Errors.Errors[0].Error())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func checkFileBundle(t *testing.T, bundle *data.DiagnosticBundle, expectedFiles []string) {
|
||||
require.NotNil(t, bundle)
|
||||
require.Nil(t, bundle.Errors.Errors)
|
||||
require.Len(t, bundle.Frames, 1)
|
||||
require.Contains(t, bundle.Frames, "collection")
|
||||
dirFrame, ok := bundle.Frames["collection"].(data.FileFrame)
|
||||
require.True(t, ok)
|
||||
require.Equal(t, []string{"files"}, dirFrame.Columns())
|
||||
i := 0
|
||||
for {
|
||||
values, ok, err := dirFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Len(t, values, 1)
|
||||
file, ok := values[0].(data.SimpleFile)
|
||||
require.True(t, ok)
|
||||
require.Contains(t, expectedFiles, file.FilePath())
|
||||
i += 1
|
||||
}
|
||||
require.Equal(t, len(expectedFiles), i)
|
||||
}
|
@ -1,235 +0,0 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/elastic/gosigar"
|
||||
"github.com/jaypipes/ghw"
|
||||
"github.com/matishsiao/goInfo"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// This collector collects the system overview
|
||||
|
||||
type SystemCollector struct {
|
||||
resourceManager *platform.ResourceManager
|
||||
}
|
||||
|
||||
func NewSystemCollector(m *platform.ResourceManager) *SystemCollector {
|
||||
return &SystemCollector{
|
||||
resourceManager: m,
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SystemCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
|
||||
_, err := conf.ValidateConfig(sc.Configuration())
|
||||
if err != nil {
|
||||
return &data.DiagnosticBundle{}, err
|
||||
}
|
||||
frames := make(map[string]data.Frame)
|
||||
var frameErrors []error
|
||||
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "disks", getDisk)
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "disk_usage", getDiskUsage)
|
||||
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "memory", getMemory)
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "memory_usage", getMemoryUsage)
|
||||
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "cpu", getCPU)
|
||||
//frameErrors = addStatsToFrame(frames, frameErrors, "cpu_usage", getCPUUsage)
|
||||
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "processes", getProcessList)
|
||||
|
||||
frameErrors = addStatsToFrame(frames, frameErrors, "os", getHostDetails)
|
||||
|
||||
return &data.DiagnosticBundle{
|
||||
Frames: frames,
|
||||
Errors: data.FrameErrors{
|
||||
Errors: frameErrors,
|
||||
},
|
||||
}, err
|
||||
}
|
||||
|
||||
func addStatsToFrame(frames map[string]data.Frame, errors []error, name string, statFunc func() (data.MemoryFrame, error)) []error {
|
||||
frame, err := statFunc()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
frames[name] = frame
|
||||
return errors
|
||||
}
|
||||
|
||||
func (sc *SystemCollector) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{},
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *SystemCollector) IsDefault() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func getDisk() (data.MemoryFrame, error) {
|
||||
block, err := ghw.Block()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list block storage")
|
||||
}
|
||||
var rows [][]interface{}
|
||||
columns := []string{"name", "size", "physicalBlockSize", "driveType", "controller", "vendor", "model", "partitionName", "partitionSize", "mountPoint", "readOnly"}
|
||||
for _, disk := range block.Disks {
|
||||
for _, part := range disk.Partitions {
|
||||
rows = append(rows, []interface{}{disk.Name, disk.SizeBytes, disk.PhysicalBlockSizeBytes, disk.DriveType, disk.StorageController, disk.Vendor, disk.Model, part.Name, part.SizeBytes, part.MountPoint, part.IsReadOnly})
|
||||
}
|
||||
}
|
||||
return data.NewMemoryFrame("disk_usage", columns, rows), nil
|
||||
}
|
||||
|
||||
func getDiskUsage() (data.MemoryFrame, error) {
|
||||
fsList := gosigar.FileSystemList{}
|
||||
err := fsList.Get()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list filesystems for usage")
|
||||
}
|
||||
rows := make([][]interface{}, len(fsList.List))
|
||||
columns := []string{"filesystem", "size", "used", "avail", "use%", "mounted on"}
|
||||
for i, fs := range fsList.List {
|
||||
dirName := fs.DirName
|
||||
usage := gosigar.FileSystemUsage{}
|
||||
err = usage.Get(dirName)
|
||||
if err == nil {
|
||||
rows[i] = []interface{}{fs.DevName, usage.Total, usage.Used, usage.Avail, usage.UsePercent(), dirName}
|
||||
} else {
|
||||
// we try to output something
|
||||
rows[i] = []interface{}{fs.DevName, 0, 0, 0, 0, dirName}
|
||||
}
|
||||
}
|
||||
return data.NewMemoryFrame("disk_usage", columns, rows), nil
|
||||
}
|
||||
|
||||
func getMemory() (data.MemoryFrame, error) {
|
||||
memory, err := ghw.Memory()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read memory")
|
||||
}
|
||||
columns := []string{"totalPhysical", "totalUsable", "supportedPageSizes"}
|
||||
rows := make([][]interface{}, 1)
|
||||
rows[0] = []interface{}{memory.TotalPhysicalBytes, memory.TotalUsableBytes, memory.SupportedPageSizes}
|
||||
return data.NewMemoryFrame("memory", columns, rows), nil
|
||||
}
|
||||
|
||||
func getMemoryUsage() (data.MemoryFrame, error) {
|
||||
mem := gosigar.Mem{}
|
||||
swap := gosigar.Swap{}
|
||||
|
||||
err := mem.Get()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read memory usage")
|
||||
}
|
||||
|
||||
err = swap.Get()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read swap")
|
||||
}
|
||||
|
||||
columns := []string{"type", "total", "used", "free"}
|
||||
rows := make([][]interface{}, 3)
|
||||
|
||||
rows[0] = []interface{}{"mem", mem.Total, mem.Used, mem.Free}
|
||||
rows[1] = []interface{}{"buffers/cache", 0, mem.ActualUsed, mem.ActualFree}
|
||||
rows[2] = []interface{}{"swap", swap.Total, swap.Used, swap.Free}
|
||||
return data.NewMemoryFrame("memory_usage", columns, rows), nil
|
||||
|
||||
}
|
||||
|
||||
func getCPU() (data.MemoryFrame, error) {
|
||||
cpu, err := ghw.CPU()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list cpus")
|
||||
}
|
||||
columns := []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}
|
||||
var rows [][]interface{}
|
||||
for _, proc := range cpu.Processors {
|
||||
for _, core := range proc.Cores {
|
||||
rows = append(rows, []interface{}{proc.ID, proc.Vendor, proc.Model, core.ID, core.NumThreads, core.LogicalProcessors, strings.Join(proc.Capabilities, " ")})
|
||||
}
|
||||
}
|
||||
return data.NewMemoryFrame("cpu", columns, rows), nil
|
||||
}
|
||||
|
||||
// this gets cpu usage vs a listing of arch etc - see getCPU(). This needs successive values as its ticks - not currently used
|
||||
// see https://github.com/elastic/beats/blob/master/metricbeat/internal/metrics/cpu/metrics.go#L131 for inspiration
|
||||
//nolint
|
||||
func getCPUUsage() (data.MemoryFrame, error) {
|
||||
cpuList := gosigar.CpuList{}
|
||||
err := cpuList.Get()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list cpus for usage")
|
||||
}
|
||||
columns := []string{"sys", "nice", "stolen", "irq", "idle", "softIrq", "user", "wait", "total"}
|
||||
rows := make([][]interface{}, len(cpuList.List), len(cpuList.List))
|
||||
for i, cpu := range cpuList.List {
|
||||
rows[i] = []interface{}{cpu.Sys, cpu.Nice, cpu.Stolen, cpu.Irq, cpu.Idle, cpu.SoftIrq, cpu.User, cpu.Wait, cpu.Total()}
|
||||
}
|
||||
return data.NewMemoryFrame("cpu_usage", columns, rows), nil
|
||||
}
|
||||
|
||||
func getProcessList() (data.MemoryFrame, error) {
|
||||
pidList := gosigar.ProcList{}
|
||||
err := pidList.Get()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list processes")
|
||||
}
|
||||
columns := []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}
|
||||
rows := make([][]interface{}, len(pidList.List))
|
||||
for i, pid := range pidList.List {
|
||||
state := gosigar.ProcState{}
|
||||
mem := gosigar.ProcMem{}
|
||||
time := gosigar.ProcTime{}
|
||||
args := gosigar.ProcArgs{}
|
||||
if err := state.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := mem.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := time.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
if err := args.Get(pid); err != nil {
|
||||
continue
|
||||
}
|
||||
rows[i] = []interface{}{pid, state.Ppid, time.FormatStartTime(), time.FormatTotal(), mem.Resident, mem.Size,
|
||||
mem.PageFaults, mem.MinorFaults, mem.MajorFaults, state.Username, state.State, state.Priority, state.Nice,
|
||||
strings.Join(args.List, " ")}
|
||||
}
|
||||
return data.NewMemoryFrame("process_list", columns, rows), nil
|
||||
}
|
||||
|
||||
func getHostDetails() (data.MemoryFrame, error) {
|
||||
gi, err := goInfo.GetInfo()
|
||||
if err != nil {
|
||||
return data.MemoryFrame{}, errors.Wrapf(err, "unable to get host summary")
|
||||
}
|
||||
columns := []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}
|
||||
rows := [][]interface{}{
|
||||
{gi.Hostname, gi.OS, gi.GoOS, gi.CPUs, gi.Core, gi.Kernel, gi.Platform},
|
||||
}
|
||||
return data.NewMemoryFrame("os", columns, rows), nil
|
||||
}
|
||||
|
||||
func (sc *SystemCollector) Description() string {
|
||||
return "Collects summary OS and hardware statistics for the host"
|
||||
}
|
||||
|
||||
// here we register the collector for use
|
||||
func init() {
|
||||
collectors.Register("system", func() (collectors.Collector, error) {
|
||||
return &SystemCollector{
|
||||
resourceManager: platform.GetResourceManager(),
|
||||
}, nil
|
||||
})
|
||||
}
|
@ -1,89 +0,0 @@
|
||||
package system_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSystemConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned for system collector", func(t *testing.T) {
|
||||
systemCollector := system.NewSystemCollector(&platform.ResourceManager{})
|
||||
conf := systemCollector.Configuration()
|
||||
require.Len(t, conf.Params, 0)
|
||||
require.Equal(t, []config.ConfigParam{}, conf.Params)
|
||||
})
|
||||
}
|
||||
|
||||
func TestSystemCollect(t *testing.T) {
|
||||
t.Run("test default system collection", func(t *testing.T) {
|
||||
systemCollector := system.NewSystemCollector(&platform.ResourceManager{})
|
||||
diagSet, err := systemCollector.Collect(config.Configuration{})
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, diagSet)
|
||||
require.Len(t, diagSet.Errors.Errors, 0)
|
||||
require.Len(t, diagSet.Frames, 7)
|
||||
require.Contains(t, diagSet.Frames, "disks")
|
||||
require.Contains(t, diagSet.Frames, "disk_usage")
|
||||
require.Contains(t, diagSet.Frames, "memory")
|
||||
require.Contains(t, diagSet.Frames, "memory_usage")
|
||||
require.Contains(t, diagSet.Frames, "cpu")
|
||||
require.Contains(t, diagSet.Frames, "processes")
|
||||
require.Contains(t, diagSet.Frames, "os")
|
||||
// responses here will vary depending on platform - mocking seems excessive so we test we have some data
|
||||
// disks
|
||||
require.Equal(t, []string{"name", "size", "physicalBlockSize", "driveType", "controller", "vendor", "model", "partitionName", "partitionSize", "mountPoint", "readOnly"}, diagSet.Frames["disks"].Columns())
|
||||
diskFrames, err := countFrameRows(diagSet, "disks")
|
||||
require.Greater(t, diskFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// disk usage
|
||||
require.Equal(t, []string{"filesystem", "size", "used", "avail", "use%", "mounted on"}, diagSet.Frames["disk_usage"].Columns())
|
||||
diskUsageFrames, err := countFrameRows(diagSet, "disk_usage")
|
||||
require.Greater(t, diskUsageFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// memory
|
||||
require.Equal(t, []string{"totalPhysical", "totalUsable", "supportedPageSizes"}, diagSet.Frames["memory"].Columns())
|
||||
memoryFrames, err := countFrameRows(diagSet, "memory")
|
||||
require.Greater(t, memoryFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// memory_usage
|
||||
require.Equal(t, []string{"type", "total", "used", "free"}, diagSet.Frames["memory_usage"].Columns())
|
||||
memoryUsageFrames, err := countFrameRows(diagSet, "memory_usage")
|
||||
require.Greater(t, memoryUsageFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// cpu
|
||||
require.Equal(t, []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}, diagSet.Frames["cpu"].Columns())
|
||||
cpuFrames, err := countFrameRows(diagSet, "cpu")
|
||||
require.Greater(t, cpuFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// processes
|
||||
require.Equal(t, []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}, diagSet.Frames["processes"].Columns())
|
||||
processesFrames, err := countFrameRows(diagSet, "processes")
|
||||
require.Greater(t, processesFrames, 0)
|
||||
require.Nil(t, err)
|
||||
// os
|
||||
require.Equal(t, []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}, diagSet.Frames["os"].Columns())
|
||||
osFrames, err := countFrameRows(diagSet, "os")
|
||||
require.Greater(t, osFrames, 0)
|
||||
require.Nil(t, err)
|
||||
})
|
||||
}
|
||||
|
||||
func countFrameRows(diagSet *data.DiagnosticBundle, frameName string) (int, error) {
|
||||
frame := diagSet.Frames[frameName]
|
||||
i := 0
|
||||
for {
|
||||
_, ok, err := frame.Next()
|
||||
if !ok {
|
||||
return i, err
|
||||
}
|
||||
if err != nil {
|
||||
return i, err
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
@ -1,344 +0,0 @@
|
||||
package file
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/csv"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/mholt/archiver/v4"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
const OutputName = "simple"
|
||||
|
||||
type SubFolderGenerator func() string
|
||||
|
||||
type SimpleOutput struct {
|
||||
// mainly used for testing to make sub folder deterministic - which it won't be by default as it uses a timestamp
|
||||
FolderGenerator SubFolderGenerator
|
||||
}
|
||||
|
||||
func (o SimpleOutput) Write(id string, bundles map[string]*data.DiagnosticBundle, conf config.Configuration) (data.FrameErrors, error) {
|
||||
conf, err := conf.ValidateConfig(o.Configuration())
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
directory, err := config.ReadStringValue(conf, "directory")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
directory, err = getWorkingDirectory(directory)
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
subFolder := strconv.FormatInt(utils.MakeTimestamp(), 10)
|
||||
if o.FolderGenerator != nil {
|
||||
subFolder = o.FolderGenerator()
|
||||
}
|
||||
skipArchive, err := config.ReadBoolValue(conf, "skip_archive")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
|
||||
outputDir := filepath.Join(directory, id, subFolder)
|
||||
log.Info().Msgf("creating bundle in %s", outputDir)
|
||||
if err := os.MkdirAll(outputDir, os.ModePerm); err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
frameErrors := data.FrameErrors{}
|
||||
var filePaths []string
|
||||
for name := range bundles {
|
||||
bundlePaths, frameError := writeDiagnosticBundle(name, bundles[name], outputDir)
|
||||
filePaths = append(filePaths, bundlePaths...)
|
||||
frameErrors.Errors = append(frameErrors.Errors, frameError.Errors...)
|
||||
}
|
||||
log.Info().Msg("bundle created")
|
||||
if !skipArchive {
|
||||
archiveFilename := filepath.Join(directory, id, fmt.Sprintf("%s.tar.gz", subFolder))
|
||||
log.Info().Msgf("compressing bundle to %s", archiveFilename)
|
||||
// produce a map containing the input paths to the archive paths - we preserve the output directory and hierarchy
|
||||
archiveMap := createArchiveMap(filePaths, directory)
|
||||
if err := createArchive(archiveFilename, archiveMap); err != nil {
|
||||
return frameErrors, err
|
||||
}
|
||||
// we delete the original directory leaving just the archive behind
|
||||
if err := os.RemoveAll(outputDir); err != nil {
|
||||
return frameErrors, err
|
||||
}
|
||||
log.Info().Msgf("archive ready at: %s ", archiveFilename)
|
||||
}
|
||||
return frameErrors, nil
|
||||
}
|
||||
|
||||
func writeDiagnosticBundle(name string, diag *data.DiagnosticBundle, baseDir string) ([]string, data.FrameErrors) {
|
||||
diagDir := filepath.Join(baseDir, name)
|
||||
if err := os.MkdirAll(diagDir, os.ModePerm); err != nil {
|
||||
return nil, data.FrameErrors{Errors: []error{
|
||||
errors.Wrapf(err, "unable to create directory for %s", name),
|
||||
}}
|
||||
}
|
||||
frameErrors := data.FrameErrors{}
|
||||
var filePaths []string
|
||||
for frameId, frame := range diag.Frames {
|
||||
fFilePath, errs := writeFrame(frameId, frame, diagDir)
|
||||
filePaths = append(filePaths, fFilePath...)
|
||||
if len(errs) > 0 {
|
||||
// it would be nice if we could wrap this list of errors into something formal but this logs well
|
||||
frameErrors.Errors = append(frameErrors.Errors, fmt.Errorf("unable to write frame %s for %s", frameId, name))
|
||||
frameErrors.Errors = append(frameErrors.Errors, errs...)
|
||||
}
|
||||
}
|
||||
return filePaths, frameErrors
|
||||
}
|
||||
|
||||
func writeFrame(frameId string, frame data.Frame, baseDir string) ([]string, []error) {
|
||||
switch f := frame.(type) {
|
||||
case data.DatabaseFrame:
|
||||
return writeDatabaseFrame(frameId, f, baseDir)
|
||||
case data.ConfigFileFrame:
|
||||
return writeConfigFrame(frameId, f, baseDir)
|
||||
case data.DirectoryFileFrame:
|
||||
return processDirectoryFileFrame(frameId, f, baseDir)
|
||||
case data.FileFrame:
|
||||
return processFileFrame(frameId, f, baseDir)
|
||||
case data.HierarchicalFrame:
|
||||
return writeHierarchicalFrame(frameId, f, baseDir)
|
||||
default:
|
||||
// for now our data frame writer supports all frames
|
||||
return writeDatabaseFrame(frameId, frame, baseDir)
|
||||
}
|
||||
}
|
||||
|
||||
func writeHierarchicalFrame(frameId string, frame data.HierarchicalFrame, baseDir string) ([]string, []error) {
|
||||
filePaths, errs := writeFrame(frameId, frame.DataFrame, baseDir)
|
||||
for _, subFrame := range frame.SubFrames {
|
||||
subDir := filepath.Join(baseDir, subFrame.Name())
|
||||
if err := os.MkdirAll(subDir, os.ModePerm); err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
}
|
||||
subPaths, subErrs := writeFrame(subFrame.Name(), subFrame, subDir)
|
||||
filePaths = append(filePaths, subPaths...)
|
||||
errs = append(errs, subErrs...)
|
||||
}
|
||||
return filePaths, errs
|
||||
}
|
||||
|
||||
func writeDatabaseFrame(frameId string, frame data.Frame, baseDir string) ([]string, []error) {
|
||||
frameFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.csv", frameId))
|
||||
var errs []error
|
||||
f, err := os.Create(frameFilePath)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
defer f.Close()
|
||||
w := csv.NewWriter(f)
|
||||
defer w.Flush()
|
||||
if err := w.Write(frame.Columns()); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to write columns for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
// we don't collect an error for every line here like configs and logs - could mean a lot of unnecessary noise
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to read frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
if !ok {
|
||||
return []string{frameFilePath}, errs
|
||||
}
|
||||
sValues := make([]string, len(values))
|
||||
for i, value := range values {
|
||||
sValues[i] = fmt.Sprintf("%v", value)
|
||||
}
|
||||
if err := w.Write(sValues); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to write row for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func writeConfigFrame(frameId string, frame data.ConfigFileFrame, baseDir string) ([]string, []error) {
|
||||
var errs []error
|
||||
frameDirectory := filepath.Join(baseDir, frameId)
|
||||
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
// this holds our files included
|
||||
includesDirectory := filepath.Join(frameDirectory, "includes")
|
||||
if err := os.MkdirAll(includesDirectory, os.ModePerm); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create includes directory for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return []string{frameDirectory}, errs
|
||||
}
|
||||
if !ok {
|
||||
return []string{frameDirectory}, errs
|
||||
}
|
||||
configFile := values[0].(data.ConfigFile)
|
||||
if !configFile.IsIncluded() {
|
||||
relPath := strings.TrimPrefix(configFile.FilePath(), frame.Directory)
|
||||
destPath := path.Join(frameDirectory, relPath)
|
||||
if err = configFile.Copy(destPath, true); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "Unable to copy file %s", configFile.FilePath()))
|
||||
}
|
||||
} else {
|
||||
// include files could be anywhere - potentially multiple with the same name. We thus, recreate the directory
|
||||
// hierarchy under includes to avoid collisions
|
||||
destPath := path.Join(includesDirectory, configFile.FilePath())
|
||||
if err = configFile.Copy(destPath, true); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "Unable to copy file %s", configFile.FilePath()))
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func processDirectoryFileFrame(frameId string, frame data.DirectoryFileFrame, baseDir string) ([]string, []error) {
|
||||
var errs []error
|
||||
// each set of files goes under its own directory to preserve grouping
|
||||
frameDirectory := filepath.Join(baseDir, frameId)
|
||||
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
return []string{frameDirectory}, errs
|
||||
}
|
||||
if !ok {
|
||||
return []string{frameDirectory}, errs
|
||||
}
|
||||
file := values[0].(data.SimpleFile)
|
||||
relPath := strings.TrimPrefix(file.FilePath(), frame.Directory)
|
||||
destPath := path.Join(frameDirectory, relPath)
|
||||
|
||||
if err = file.Copy(destPath, true); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to copy file %s for frame %s", file, frameId))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func processFileFrame(frameId string, frame data.FileFrame, baseDir string) ([]string, []error) {
|
||||
var errs []error
|
||||
frameDirectory := filepath.Join(baseDir, frameId)
|
||||
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
|
||||
return []string{}, errs
|
||||
}
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
if !ok {
|
||||
return []string{frameDirectory}, errs
|
||||
}
|
||||
file := values[0].(data.SimpleFile)
|
||||
// we need an absolute path to preserve the directory hierarchy
|
||||
dir, err := filepath.Abs(filepath.Dir(file.FilePath()))
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to determine dir for %s", file.FilePath()))
|
||||
}
|
||||
outputDir := filepath.Join(frameDirectory, dir)
|
||||
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(outputDir, os.ModePerm); err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to create directory for %s", file.FilePath()))
|
||||
} else {
|
||||
outputPath := filepath.Join(outputDir, filepath.Base(file.FilePath()))
|
||||
err = file.Copy(outputPath, false)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "unable to copy file %s", file.FilePath()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getWorkingDirectory(path string) (string, error) {
|
||||
if !filepath.IsAbs(path) {
|
||||
workingPath, err := os.Getwd()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filepath.Join(workingPath, path), nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
func createArchiveMap(filePaths []string, prefix string) map[string]string {
|
||||
archiveMap := make(map[string]string)
|
||||
for _, path := range filePaths {
|
||||
archiveMap[path] = strings.TrimPrefix(path, prefix)
|
||||
}
|
||||
return archiveMap
|
||||
}
|
||||
|
||||
func createArchive(outputFile string, filePaths map[string]string) error {
|
||||
files, err := archiver.FilesFromDisk(nil, filePaths)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
out, err := os.Create(outputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer out.Close()
|
||||
format := archiver.CompressedArchive{
|
||||
Compression: archiver.Gz{},
|
||||
Archival: archiver.Tar{},
|
||||
}
|
||||
err = format.Archive(context.Background(), out, files)
|
||||
return err
|
||||
}
|
||||
|
||||
func (o SimpleOutput) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "./",
|
||||
Param: config.NewParam("directory", "Directory in which to create dump. Defaults to the current directory.", false),
|
||||
},
|
||||
config.StringOptions{
|
||||
Value: "csv",
|
||||
// TODO: add tsv and others here later
|
||||
Options: []string{"csv"},
|
||||
Param: config.NewParam("format", "Format of exported files", false),
|
||||
},
|
||||
config.BoolParam{
|
||||
Value: false,
|
||||
Param: config.NewParam("skip_archive", "Don't compress output to an archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (o SimpleOutput) Description() string {
|
||||
return "Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive."
|
||||
}
|
||||
|
||||
// here we register the output for use
|
||||
func init() {
|
||||
outputs.Register(OutputName, func() (outputs.Output, error) {
|
||||
return SimpleOutput{}, nil
|
||||
})
|
||||
}
|
@ -1,468 +0,0 @@
|
||||
package file_test
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var clusterFrame = test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
|
||||
[][]interface{}{
|
||||
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
|
||||
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9001, 1, "default", "", 0, 0, 0},
|
||||
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9002, 1, "default", "", 0, 0, 0},
|
||||
},
|
||||
)
|
||||
|
||||
var diskFrame = test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space", "keep_free_space", "type"},
|
||||
[][]interface{}{
|
||||
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352, "", "local"},
|
||||
},
|
||||
)
|
||||
|
||||
var userFrame = test.NewFakeDataFrame("users", []string{"name", "id", "storage", "auth_type", "auth_params", "host_ip", "host_names", "host_names_regexp", "host_names_like"},
|
||||
[][]interface{}{
|
||||
{"default", "94309d50-4f52-5250-31bd-74fecac179db,users.xml,plaintext_password", "sha256_password", []string{"::0"}, []string{}, []string{}, []string{}},
|
||||
},
|
||||
)
|
||||
|
||||
func TestConfiguration(t *testing.T) {
|
||||
t.Run("correct configuration is returned", func(t *testing.T) {
|
||||
output := file.SimpleOutput{}
|
||||
conf := output.Configuration()
|
||||
require.Len(t, conf.Params, 3)
|
||||
// check first directory param
|
||||
require.IsType(t, config.StringParam{}, conf.Params[0])
|
||||
directory, ok := conf.Params[0].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, directory.Required())
|
||||
require.Equal(t, "directory", directory.Name())
|
||||
require.Equal(t, "./", directory.Value)
|
||||
// check second format param
|
||||
require.IsType(t, config.StringOptions{}, conf.Params[1])
|
||||
format, ok := conf.Params[1].(config.StringOptions)
|
||||
require.True(t, ok)
|
||||
require.False(t, format.Required())
|
||||
require.Equal(t, "format", format.Name())
|
||||
require.Equal(t, "csv", format.Value)
|
||||
require.Equal(t, []string{"csv"}, format.Options)
|
||||
// check third format compress
|
||||
require.IsType(t, config.BoolParam{}, conf.Params[2])
|
||||
skipArchive, ok := conf.Params[2].(config.BoolParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, format.Required())
|
||||
require.False(t, skipArchive.Value)
|
||||
})
|
||||
}
|
||||
|
||||
func TestWrite(t *testing.T) {
|
||||
bundles := map[string]*data.DiagnosticBundle{
|
||||
"systemA": {
|
||||
Frames: map[string]data.Frame{
|
||||
"disk": diskFrame,
|
||||
"cluster": clusterFrame,
|
||||
},
|
||||
},
|
||||
"systemB": {
|
||||
Frames: map[string]data.Frame{
|
||||
"user": userFrame,
|
||||
},
|
||||
},
|
||||
}
|
||||
t.Run("test we can write simple diagnostic sets", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: tempDir,
|
||||
},
|
||||
// turn compression off as the folder will be deleted by default
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", bundles, configuration)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, data.FrameErrors{}, frameErrors)
|
||||
clusterFile := path.Join(tempDir, "test", "test", "systemA", "cluster.csv")
|
||||
diskFile := path.Join(tempDir, "test", "test", "systemA", "disk.csv")
|
||||
userFile := path.Join(tempDir, "test", "test", "systemB", "user.csv")
|
||||
require.FileExists(t, clusterFile)
|
||||
require.FileExists(t, diskFile)
|
||||
require.FileExists(t, userFile)
|
||||
diskLines, err := readFileLines(diskFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, diskLines, 2)
|
||||
usersLines, err := readFileLines(userFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, usersLines, 2)
|
||||
clusterLines, err := readFileLines(clusterFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, clusterLines, 4)
|
||||
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), clusterLines[0])
|
||||
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", clusterLines[1])
|
||||
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", clusterLines[2])
|
||||
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", clusterLines[3])
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test invalid parameter", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: tempDir,
|
||||
},
|
||||
config.StringOptions{
|
||||
Value: "random",
|
||||
Options: []string{"csv"},
|
||||
// TODO: add tsv and others here later
|
||||
Param: config.NewParam("format", "Format of exported files", false),
|
||||
},
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip compressed archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", bundles, configuration)
|
||||
require.Equal(t, data.FrameErrors{}, frameErrors)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "parameter format is invalid - random is not a valid value for format - [csv]", err.Error())
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test compression", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: tempDir,
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", bundles, configuration)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, data.FrameErrors{}, frameErrors)
|
||||
archiveFileName := path.Join(tempDir, "test", "test.tar.gz")
|
||||
fi, err := os.Stat(archiveFileName)
|
||||
require.Nil(t, err)
|
||||
require.FileExists(t, archiveFileName)
|
||||
// compression will vary so lets test range
|
||||
require.Greater(t, int64(600), fi.Size())
|
||||
require.Less(t, int64(200), fi.Size())
|
||||
outputFolder := path.Join(tempDir, "test", "test")
|
||||
// check the folder doesn't exist and is cleaned up
|
||||
require.NoFileExists(t, outputFolder)
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test support for directory frames", func(t *testing.T) {
|
||||
// create 5 temporary files
|
||||
tempDir := t.TempDir()
|
||||
files := createRandomFiles(tempDir, 5)
|
||||
dirFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*.log"})
|
||||
require.Empty(t, errs)
|
||||
fileBundles := map[string]*data.DiagnosticBundle{
|
||||
"systemA": {
|
||||
Frames: map[string]data.Frame{
|
||||
"disk": diskFrame,
|
||||
"cluster": clusterFrame,
|
||||
},
|
||||
},
|
||||
"config": {
|
||||
Frames: map[string]data.Frame{
|
||||
"logs": dirFrame,
|
||||
},
|
||||
},
|
||||
}
|
||||
destDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: destDir,
|
||||
},
|
||||
// turn compression off as the folder will be deleted by default
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", fileBundles, configuration)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, frameErrors)
|
||||
|
||||
// test the usual frames still work
|
||||
clusterFile := path.Join(destDir, "test", "test", "systemA", "cluster.csv")
|
||||
diskFile := path.Join(destDir, "test", "test", "systemA", "disk.csv")
|
||||
require.FileExists(t, clusterFile)
|
||||
require.FileExists(t, diskFile)
|
||||
diskLines, err := readFileLines(diskFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, diskLines, 2)
|
||||
clusterLines, err := readFileLines(clusterFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, clusterLines, 4)
|
||||
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), clusterLines[0])
|
||||
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", clusterLines[1])
|
||||
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", clusterLines[2])
|
||||
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", clusterLines[3])
|
||||
//test our directory frame
|
||||
for _, filepath := range files {
|
||||
// check they were copied
|
||||
subPath := strings.TrimPrefix(filepath, tempDir)
|
||||
// path here will be <destDir>/<id>/test>/config/logs/<sub path>
|
||||
newPath := path.Join(destDir, "test", "test", "config", "logs", subPath)
|
||||
require.FileExists(t, newPath)
|
||||
}
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test support for config frames", func(t *testing.T) {
|
||||
xmlConfig := data.XmlConfig{
|
||||
XMLName: xml.Name{},
|
||||
Clickhouse: data.XmlLoggerConfig{
|
||||
XMLName: xml.Name{},
|
||||
ErrorLog: "/var/log/clickhouse-server/clickhouse-server.err.log",
|
||||
Log: "/var/log/clickhouse-server/clickhouse-server.log",
|
||||
},
|
||||
IncludeFrom: "",
|
||||
}
|
||||
tempDir := t.TempDir()
|
||||
confDir := path.Join(tempDir, "conf")
|
||||
// create an includes file
|
||||
includesDir := path.Join(tempDir, "includes")
|
||||
err := os.MkdirAll(includesDir, os.ModePerm)
|
||||
require.Nil(t, err)
|
||||
includesPath := path.Join(includesDir, "random.xml")
|
||||
includesFile, err := os.Create(includesPath)
|
||||
require.Nil(t, err)
|
||||
xmlWriter := io.Writer(includesFile)
|
||||
enc := xml.NewEncoder(xmlWriter)
|
||||
enc.Indent(" ", " ")
|
||||
err = enc.Encode(xmlConfig)
|
||||
require.Nil(t, err)
|
||||
// create 5 temporary config files
|
||||
files := make([]string, 5)
|
||||
// set the includes
|
||||
xmlConfig.IncludeFrom = includesPath
|
||||
for i := 0; i < 5; i++ {
|
||||
// we want to check hierarchies are preserved so create a simple folder for each file
|
||||
fileDir := path.Join(confDir, fmt.Sprintf("%d", i))
|
||||
err := os.MkdirAll(fileDir, os.ModePerm)
|
||||
require.Nil(t, err)
|
||||
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.xml", i))
|
||||
files[i] = filepath
|
||||
xmlFile, err := os.Create(filepath)
|
||||
require.Nil(t, err)
|
||||
// write a little xml so its valid
|
||||
xmlWriter := io.Writer(xmlFile)
|
||||
enc := xml.NewEncoder(xmlWriter)
|
||||
enc.Indent(" ", " ")
|
||||
err = enc.Encode(xmlConfig)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
configFrame, errs := data.NewConfigFileFrame(confDir)
|
||||
require.Empty(t, errs)
|
||||
fileBundles := map[string]*data.DiagnosticBundle{
|
||||
"systemA": {
|
||||
Frames: map[string]data.Frame{
|
||||
"disk": diskFrame,
|
||||
"cluster": clusterFrame,
|
||||
},
|
||||
},
|
||||
"config": {
|
||||
Frames: map[string]data.Frame{
|
||||
"user_specified": configFrame,
|
||||
},
|
||||
},
|
||||
}
|
||||
destDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: destDir,
|
||||
},
|
||||
// turn compression off as the folder will be deleted by default
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", fileBundles, configuration)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, frameErrors)
|
||||
require.Empty(t, frameErrors.Errors)
|
||||
//test our config frame
|
||||
for _, filepath := range files {
|
||||
// check they were copied
|
||||
subPath := strings.TrimPrefix(filepath, confDir)
|
||||
// path here will be <destDir>/<id>/test>/config/user_specified/file
|
||||
newPath := path.Join(destDir, "test", "test", "config", "user_specified", subPath)
|
||||
require.FileExists(t, newPath)
|
||||
}
|
||||
// check our includes file exits
|
||||
// path here will be <destDir>/<id>/test>/config/user_specified/file/includes
|
||||
require.FileExists(t, path.Join(destDir, "test", "test", "config", "user_specified", "includes", includesPath))
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test support for file frames", func(t *testing.T) {
|
||||
// create 5 temporary files
|
||||
tempDir := t.TempDir()
|
||||
files := createRandomFiles(tempDir, 5)
|
||||
fileFrame := data.NewFileFrame("collection", files)
|
||||
fileBundles := map[string]*data.DiagnosticBundle{
|
||||
"systemA": {
|
||||
Frames: map[string]data.Frame{
|
||||
"disk": diskFrame,
|
||||
"cluster": clusterFrame,
|
||||
},
|
||||
},
|
||||
"file": {
|
||||
Frames: map[string]data.Frame{
|
||||
"collection": fileFrame,
|
||||
},
|
||||
},
|
||||
}
|
||||
destDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: destDir,
|
||||
},
|
||||
// turn compression off as the folder will be deleted by default
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
frameErrors, err := output.Write("test", fileBundles, configuration)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, frameErrors)
|
||||
//test our directory frame
|
||||
for _, filepath := range files {
|
||||
// path here will be <destDir>/<id>/test>/file/collection/<sub path>
|
||||
newPath := path.Join(destDir, "test", "test", "file", "collection", filepath)
|
||||
require.FileExists(t, newPath)
|
||||
}
|
||||
resetFrames()
|
||||
})
|
||||
|
||||
t.Run("test support for hierarchical frames", func(t *testing.T) {
|
||||
bottomFrame := data.NewHierarchicalFrame("bottomLevel", userFrame, []data.HierarchicalFrame{})
|
||||
middleFrame := data.NewHierarchicalFrame("middleLevel", diskFrame, []data.HierarchicalFrame{bottomFrame})
|
||||
topFrame := data.NewHierarchicalFrame("topLevel", clusterFrame, []data.HierarchicalFrame{middleFrame})
|
||||
tempDir := t.TempDir()
|
||||
configuration := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Param: config.NewParam("directory", "A directory", true),
|
||||
Value: tempDir,
|
||||
},
|
||||
// turn compression off as the folder will be deleted by default
|
||||
config.BoolParam{
|
||||
Value: true,
|
||||
Param: config.NewParam("skip_archive", "Skip archive", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
output := file.SimpleOutput{FolderGenerator: staticFolderName}
|
||||
hierarchicalBundle := map[string]*data.DiagnosticBundle{
|
||||
"systemA": {
|
||||
Frames: map[string]data.Frame{
|
||||
"topLevel": topFrame,
|
||||
},
|
||||
},
|
||||
}
|
||||
frameErrors, err := output.Write("test", hierarchicalBundle, configuration)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, data.FrameErrors{}, frameErrors)
|
||||
topFile := path.Join(tempDir, "test", "test", "systemA", "topLevel.csv")
|
||||
middleFile := path.Join(tempDir, "test", "test", "systemA", "middleLevel", "middleLevel.csv")
|
||||
bottomFile := path.Join(tempDir, "test", "test", "systemA", "middleLevel", "bottomLevel", "bottomLevel.csv")
|
||||
require.FileExists(t, topFile)
|
||||
require.FileExists(t, middleFile)
|
||||
require.FileExists(t, bottomFile)
|
||||
topLines, err := readFileLines(topFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, topLines, 4)
|
||||
middleLines, err := readFileLines(middleFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, middleLines, 2)
|
||||
bottomLines, err := readFileLines(bottomFile)
|
||||
require.Nil(t, err)
|
||||
require.Len(t, bottomLines, 2)
|
||||
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), topLines[0])
|
||||
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", topLines[1])
|
||||
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", topLines[2])
|
||||
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", topLines[3])
|
||||
resetFrames()
|
||||
})
|
||||
}
|
||||
|
||||
func createRandomFiles(tempDir string, num int) []string {
|
||||
files := make([]string, num)
|
||||
for i := 0; i < 5; i++ {
|
||||
// we want to check hierarchies are preserved so create a simple folder for each file
|
||||
fileDir := path.Join(tempDir, fmt.Sprintf("%d", i))
|
||||
os.MkdirAll(fileDir, os.ModePerm) //nolint:errcheck
|
||||
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.log", i))
|
||||
files[i] = filepath
|
||||
os.Create(filepath) //nolint:errcheck
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func resetFrames() {
|
||||
clusterFrame.Reset()
|
||||
userFrame.Reset()
|
||||
diskFrame.Reset()
|
||||
}
|
||||
|
||||
func readFileLines(filename string) ([]string, error) {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
var lines []string
|
||||
scanner := bufio.NewScanner(file)
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
}
|
||||
return lines, scanner.Err()
|
||||
}
|
||||
|
||||
func staticFolderName() string {
|
||||
return "test"
|
||||
}
|
@ -1,67 +0,0 @@
|
||||
package outputs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/rs/zerolog/log"
|
||||
)
|
||||
|
||||
type Output interface {
|
||||
Write(id string, bundles map[string]*data.DiagnosticBundle, config config.Configuration) (data.FrameErrors, error)
|
||||
Configuration() config.Configuration
|
||||
Description() string
|
||||
// TODO: we will need to implement this for the convert function
|
||||
//Read(config config.Configuration) (data.DiagnosticBundle, error)
|
||||
}
|
||||
|
||||
// Register can be called from init() on an output in this package
|
||||
// It will automatically be added to the Outputs map to be called externally
|
||||
func Register(name string, output OutputFactory) {
|
||||
// names must be unique
|
||||
if _, ok := Outputs[name]; ok {
|
||||
log.Error().Msgf("More than 1 output is trying to register under the name %s. Names must be unique.", name)
|
||||
}
|
||||
Outputs[name] = output
|
||||
}
|
||||
|
||||
// OutputFactory lets us use a closure to get instances of the output struct
|
||||
type OutputFactory func() (Output, error)
|
||||
|
||||
var Outputs = map[string]OutputFactory{}
|
||||
|
||||
func GetOutputNames() []string {
|
||||
outputs := make([]string, len(Outputs))
|
||||
i := 0
|
||||
for k := range Outputs {
|
||||
outputs[i] = k
|
||||
i++
|
||||
}
|
||||
return outputs
|
||||
}
|
||||
|
||||
func GetOutputByName(name string) (Output, error) {
|
||||
if outputFactory, ok := Outputs[name]; ok {
|
||||
//do something here
|
||||
output, err := outputFactory()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "output %s could not be initialized", name)
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
return nil, fmt.Errorf("%s is not a valid output name", name)
|
||||
}
|
||||
|
||||
func BuildConfigurationOptions() (map[string]config.Configuration, error) {
|
||||
configurations := make(map[string]config.Configuration)
|
||||
for name, collectorFactory := range Outputs {
|
||||
output, err := collectorFactory()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "output %s could not be initialized", name)
|
||||
}
|
||||
configurations[name] = output.Configuration()
|
||||
}
|
||||
return configurations, nil
|
||||
}
|
@ -1,45 +0,0 @@
|
||||
package outputs_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
|
||||
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/terminal"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetOutputNames(t *testing.T) {
|
||||
t.Run("can get all output names", func(t *testing.T) {
|
||||
outputNames := outputs.GetOutputNames()
|
||||
require.ElementsMatch(t, []string{"simple", "report"}, outputNames)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestGetOutputByName(t *testing.T) {
|
||||
|
||||
t.Run("can get output by name", func(t *testing.T) {
|
||||
output, err := outputs.GetOutputByName("simple")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, file.SimpleOutput{}, output)
|
||||
})
|
||||
|
||||
t.Run("fails on non existing output", func(t *testing.T) {
|
||||
output, err := outputs.GetOutputByName("random")
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "random is not a valid output name", err.Error())
|
||||
require.Nil(t, output)
|
||||
})
|
||||
}
|
||||
|
||||
func TestBuildConfigurationOptions(t *testing.T) {
|
||||
|
||||
t.Run("can get all output configurations", func(t *testing.T) {
|
||||
outputs, err := outputs.BuildConfigurationOptions()
|
||||
require.Nil(t, err)
|
||||
require.Len(t, outputs, 2)
|
||||
require.Contains(t, outputs, "simple")
|
||||
require.Contains(t, outputs, "report")
|
||||
})
|
||||
}
|
@ -1,284 +0,0 @@
|
||||
package terminal
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const OutputName = "report"
|
||||
|
||||
type ReportOutput struct {
|
||||
}
|
||||
|
||||
func (r ReportOutput) Write(id string, bundles map[string]*data.DiagnosticBundle, conf config.Configuration) (data.FrameErrors, error) {
|
||||
conf, err := conf.ValidateConfig(r.Configuration())
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
format, err := config.ReadStringOptionsValue(conf, "format")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
nonInteractive, err := config.ReadBoolValue(conf, "continue")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
maxRows, err := config.ReadIntValue(conf, "row_limit")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
maxColumns, err := config.ReadIntValue(conf, "column_limit")
|
||||
if err != nil {
|
||||
return data.FrameErrors{}, err
|
||||
}
|
||||
frameErrors := data.FrameErrors{}
|
||||
for name := range bundles {
|
||||
frameError := printDiagnosticBundle(name, bundles[name], format, !nonInteractive, int(maxRows), int(maxColumns))
|
||||
frameErrors.Errors = append(frameErrors.Errors, frameError.Errors...)
|
||||
}
|
||||
return data.FrameErrors{}, nil
|
||||
}
|
||||
|
||||
func printDiagnosticBundle(name string, diag *data.DiagnosticBundle, format string, interactive bool, maxRows, maxColumns int) data.FrameErrors {
|
||||
frameErrors := data.FrameErrors{}
|
||||
for frameId, frame := range diag.Frames {
|
||||
printFrameHeader(fmt.Sprintf("%s.%s", name, frameId))
|
||||
err := printFrame(frame, format, maxRows, maxColumns)
|
||||
if err != nil {
|
||||
frameErrors.Errors = append(frameErrors.Errors, err)
|
||||
}
|
||||
if interactive {
|
||||
err := waitForEnter()
|
||||
if err != nil {
|
||||
frameErrors.Errors = append(frameErrors.Errors, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return frameErrors
|
||||
}
|
||||
|
||||
func waitForEnter() error {
|
||||
fmt.Println("Press the Enter Key to view the next frame report")
|
||||
for {
|
||||
consoleReader := bufio.NewReaderSize(os.Stdin, 1)
|
||||
input, err := consoleReader.ReadByte()
|
||||
if err != nil {
|
||||
return errors.New("Unable to read user input")
|
||||
}
|
||||
if input == 3 {
|
||||
//ctl +c
|
||||
fmt.Println("Exiting...")
|
||||
os.Exit(0)
|
||||
}
|
||||
if input == 10 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func printFrame(frame data.Frame, format string, maxRows, maxColumns int) error {
|
||||
switch f := frame.(type) {
|
||||
case data.DatabaseFrame:
|
||||
return printDatabaseFrame(f, format, maxRows, maxColumns)
|
||||
case data.ConfigFileFrame:
|
||||
return printConfigFrame(f, format)
|
||||
case data.DirectoryFileFrame:
|
||||
return printDirectoryFileFrame(f, format, maxRows)
|
||||
case data.HierarchicalFrame:
|
||||
return printHierarchicalFrame(f, format, maxRows, maxColumns)
|
||||
default:
|
||||
// for now our data frame writer supports all frames
|
||||
return printDatabaseFrame(f, format, maxRows, maxColumns)
|
||||
}
|
||||
}
|
||||
|
||||
func createTable(format string) *tablewriter.Table {
|
||||
table := tablewriter.NewWriter(os.Stdout)
|
||||
if format == "markdown" {
|
||||
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
|
||||
table.SetCenterSeparator("|")
|
||||
}
|
||||
return table
|
||||
}
|
||||
|
||||
func printFrameHeader(title string) {
|
||||
titleTable := tablewriter.NewWriter(os.Stdout)
|
||||
titleTable.SetHeader([]string{title})
|
||||
titleTable.SetAutoWrapText(false)
|
||||
titleTable.SetAutoFormatHeaders(true)
|
||||
titleTable.SetHeaderAlignment(tablewriter.ALIGN_CENTER)
|
||||
titleTable.SetRowSeparator("\n")
|
||||
titleTable.SetHeaderLine(false)
|
||||
titleTable.SetBorder(false)
|
||||
titleTable.SetTablePadding("\t") // pad with tabs
|
||||
titleTable.SetNoWhiteSpace(true)
|
||||
titleTable.Render()
|
||||
}
|
||||
|
||||
func printHierarchicalFrame(frame data.HierarchicalFrame, format string, maxRows, maxColumns int) error {
|
||||
err := printDatabaseFrame(frame, format, maxRows, maxColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, subFrame := range frame.SubFrames {
|
||||
err = printHierarchicalFrame(subFrame, format, maxRows, maxColumns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func printDatabaseFrame(frame data.Frame, format string, maxRows, maxColumns int) error {
|
||||
table := createTable(format)
|
||||
table.SetAutoWrapText(false)
|
||||
columns := len(frame.Columns())
|
||||
if maxColumns > 0 && maxColumns < columns {
|
||||
columns = maxColumns
|
||||
}
|
||||
table.SetHeader(frame.Columns()[:columns])
|
||||
r := 0
|
||||
trunColumns := 0
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if !ok || r == maxRows {
|
||||
table.Render()
|
||||
if trunColumns > 0 {
|
||||
warning(fmt.Sprintf("Truncated %d columns, more available...", trunColumns))
|
||||
}
|
||||
if r == maxRows {
|
||||
warning("Truncated rows, more available...")
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
columns := len(values)
|
||||
// -1 means unlimited
|
||||
if maxColumns > 0 && maxColumns < columns {
|
||||
trunColumns = columns - maxColumns
|
||||
columns = maxColumns
|
||||
}
|
||||
row := make([]string, columns)
|
||||
for i, value := range values {
|
||||
if i == columns {
|
||||
break
|
||||
}
|
||||
row[i] = fmt.Sprintf("%v", value)
|
||||
}
|
||||
table.Append(row)
|
||||
r++
|
||||
}
|
||||
}
|
||||
|
||||
// currently we dump the whole config - useless in parts
|
||||
func printConfigFrame(frame data.Frame, format string) error {
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if !ok {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
configFile := values[0].(data.File)
|
||||
dat, err := os.ReadFile(configFile.FilePath())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// create a table per row - as each will be a file
|
||||
table := createTable(format)
|
||||
table.SetAutoWrapText(false)
|
||||
table.SetAutoFormatHeaders(false)
|
||||
table.ClearRows()
|
||||
table.SetHeader([]string{configFile.FilePath()})
|
||||
table.Append([]string{string(dat)})
|
||||
table.Render()
|
||||
}
|
||||
}
|
||||
|
||||
func printDirectoryFileFrame(frame data.Frame, format string, maxRows int) error {
|
||||
for {
|
||||
values, ok, err := frame.Next()
|
||||
if !ok {
|
||||
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := values[0].(data.SimpleFile)
|
||||
file, err := os.Open(path.FilePath())
|
||||
if err != nil {
|
||||
// failure on one file causes rest to be ignored in frame...we could improve this
|
||||
return errors.Wrapf(err, "Unable to read file %s", path.FilePath())
|
||||
}
|
||||
scanner := bufio.NewScanner(file)
|
||||
i := 0
|
||||
// create a table per row - as each will be a file
|
||||
table := createTable(format)
|
||||
table.SetAutoWrapText(false)
|
||||
table.SetAutoFormatHeaders(false)
|
||||
table.ClearRows()
|
||||
table.SetHeader([]string{path.FilePath()})
|
||||
for scanner.Scan() {
|
||||
if i == maxRows {
|
||||
fmt.Println()
|
||||
table.Render()
|
||||
warning("Truncated lines, more available...")
|
||||
fmt.Print("\n")
|
||||
break
|
||||
}
|
||||
table.Append([]string{scanner.Text()})
|
||||
i++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// prints a warning
|
||||
func warning(s string) {
|
||||
fmt.Printf("\x1b[%dm%v\x1b[0m%s\n", 33, "WARNING: ", s)
|
||||
}
|
||||
|
||||
func (r ReportOutput) Configuration() config.Configuration {
|
||||
return config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringOptions{
|
||||
Value: "default",
|
||||
Options: []string{"default", "markdown"},
|
||||
Param: config.NewParam("format", "Format of tables. Default is terminal friendly.", false),
|
||||
},
|
||||
config.BoolParam{
|
||||
Value: false,
|
||||
Param: config.NewParam("continue", "Print report with no interaction", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 10,
|
||||
Param: config.NewParam("row_limit", "Max Rows to print per frame.", false),
|
||||
},
|
||||
config.IntParam{
|
||||
Value: 8,
|
||||
Param: config.NewParam("column_limit", "Max Columns to print per frame. Negative is unlimited.", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (r ReportOutput) Description() string {
|
||||
return "Writes out the diagnostic bundle to the terminal as a simple report."
|
||||
}
|
||||
|
||||
// here we register the output for use
|
||||
func init() {
|
||||
outputs.Register(OutputName, func() (outputs.Output, error) {
|
||||
return ReportOutput{}, nil
|
||||
})
|
||||
}
|
@ -1,129 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
)
|
||||
|
||||
type ConfigParam interface {
|
||||
Name() string
|
||||
Required() bool
|
||||
Description() string
|
||||
validate(defaultConfig ConfigParam) error
|
||||
}
|
||||
|
||||
type Configuration struct {
|
||||
Params []ConfigParam
|
||||
}
|
||||
|
||||
type Param struct {
|
||||
name string
|
||||
description string
|
||||
required bool
|
||||
}
|
||||
|
||||
func NewParam(name string, description string, required bool) Param {
|
||||
return Param{
|
||||
name: name,
|
||||
description: description,
|
||||
required: required,
|
||||
}
|
||||
}
|
||||
|
||||
func (bp Param) Name() string {
|
||||
return bp.name
|
||||
}
|
||||
|
||||
func (bp Param) Required() bool {
|
||||
return bp.required
|
||||
}
|
||||
|
||||
func (bp Param) Description() string {
|
||||
return bp.description
|
||||
}
|
||||
|
||||
func (bp Param) validate(defaultConfig ConfigParam) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Configuration) GetConfigParam(paramName string) (ConfigParam, error) {
|
||||
for _, param := range c.Params {
|
||||
if param.Name() == paramName {
|
||||
return param, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("%s does not exist", paramName)
|
||||
}
|
||||
|
||||
// ValidateConfig finds the intersection of a config c and a default config. Requires all possible params to be in default.
|
||||
func (c Configuration) ValidateConfig(defaultConfig Configuration) (Configuration, error) {
|
||||
var finalParams []ConfigParam
|
||||
for _, defaultParam := range defaultConfig.Params {
|
||||
setParam, err := c.GetConfigParam(defaultParam.Name())
|
||||
if err == nil {
|
||||
// check the set value is valid
|
||||
if err := setParam.validate(defaultParam); err != nil {
|
||||
return Configuration{}, fmt.Errorf("parameter %s is invalid - %s", defaultParam.Name(), err.Error())
|
||||
}
|
||||
finalParams = append(finalParams, setParam)
|
||||
} else if defaultParam.Required() {
|
||||
return Configuration{}, fmt.Errorf("missing required parameter %s - %s", defaultParam.Name(), err.Error())
|
||||
} else {
|
||||
finalParams = append(finalParams, defaultParam)
|
||||
}
|
||||
}
|
||||
return Configuration{
|
||||
Params: finalParams,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type StringParam struct {
|
||||
Param
|
||||
Value string
|
||||
AllowEmpty bool
|
||||
}
|
||||
|
||||
func (sp StringParam) validate(defaultConfig ConfigParam) error {
|
||||
dsp := defaultConfig.(StringParam)
|
||||
if !dsp.AllowEmpty && strings.TrimSpace(sp.Value) == "" {
|
||||
return fmt.Errorf("%s cannot be empty", sp.Name())
|
||||
}
|
||||
// if the parameter is not required it doesn't matter
|
||||
return nil
|
||||
}
|
||||
|
||||
type StringListParam struct {
|
||||
Param
|
||||
Values []string
|
||||
}
|
||||
|
||||
type StringOptions struct {
|
||||
Param
|
||||
Options []string
|
||||
Value string
|
||||
AllowEmpty bool
|
||||
}
|
||||
|
||||
func (so StringOptions) validate(defaultConfig ConfigParam) error {
|
||||
dso := defaultConfig.(StringOptions)
|
||||
if !dso.AllowEmpty && strings.TrimSpace(so.Value) == "" {
|
||||
return fmt.Errorf("%s cannot be empty", so.Name())
|
||||
}
|
||||
if !utils.Contains(dso.Options, so.Value) {
|
||||
return fmt.Errorf("%s is not a valid value for %s - %v", so.Value, so.Name(), so.Options)
|
||||
}
|
||||
// if the parameter is not required it doesn't matter
|
||||
return nil
|
||||
}
|
||||
|
||||
type IntParam struct {
|
||||
Param
|
||||
Value int64
|
||||
}
|
||||
|
||||
type BoolParam struct {
|
||||
Param
|
||||
Value bool
|
||||
}
|
@ -1,182 +0,0 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
var conf = config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
Values: []string{"some", "values"},
|
||||
Param: config.NewParam("paramA", "", false),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "random",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
AllowEmpty: true,
|
||||
Param: config.NewParam("paramC", "", false),
|
||||
},
|
||||
config.StringOptions{
|
||||
Value: "random",
|
||||
Options: []string{"random", "very_random", "very_very_random"},
|
||||
Param: config.NewParam("paramD", "", false),
|
||||
AllowEmpty: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func TestGetConfigParam(t *testing.T) {
|
||||
|
||||
t.Run("can find get config param by name", func(t *testing.T) {
|
||||
paramA, err := conf.GetConfigParam("paramA")
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, paramA)
|
||||
require.IsType(t, config.StringListParam{}, paramA)
|
||||
stringListParam, ok := paramA.(config.StringListParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, stringListParam.Required())
|
||||
require.Equal(t, stringListParam.Name(), "paramA")
|
||||
require.ElementsMatch(t, stringListParam.Values, []string{"some", "values"})
|
||||
})
|
||||
|
||||
t.Run("throws error on missing element", func(t *testing.T) {
|
||||
paramZ, err := conf.GetConfigParam("paramZ")
|
||||
require.Nil(t, paramZ)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, err.Error(), "paramZ does not exist")
|
||||
})
|
||||
}
|
||||
|
||||
func TestValidateConfig(t *testing.T) {
|
||||
|
||||
t.Run("validate adds the default and allows override", func(t *testing.T) {
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "custom",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.Nil(t, err)
|
||||
require.NotNil(t, newConf)
|
||||
require.Len(t, newConf.Params, 4)
|
||||
// check first param
|
||||
require.IsType(t, config.StringListParam{}, newConf.Params[0])
|
||||
stringListParam, ok := newConf.Params[0].(config.StringListParam)
|
||||
require.True(t, ok)
|
||||
require.False(t, stringListParam.Required())
|
||||
require.Equal(t, stringListParam.Name(), "paramA")
|
||||
require.ElementsMatch(t, stringListParam.Values, []string{"some", "values"})
|
||||
// check second param
|
||||
require.IsType(t, config.StringParam{}, newConf.Params[1])
|
||||
stringParam, ok := newConf.Params[1].(config.StringParam)
|
||||
require.True(t, ok)
|
||||
require.True(t, stringParam.Required())
|
||||
require.Equal(t, "paramB", stringParam.Name())
|
||||
require.Equal(t, "custom", stringParam.Value)
|
||||
})
|
||||
|
||||
t.Run("validate errors if missing param", func(t *testing.T) {
|
||||
//missing required paramB
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
Values: []string{"some", "values"},
|
||||
Param: config.NewParam("paramA", "", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.Nil(t, newConf.Params)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "missing required parameter paramB - paramB does not exist", err.Error())
|
||||
})
|
||||
|
||||
t.Run("validate errors if invalid string value", func(t *testing.T) {
|
||||
//missing required paramB
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.Nil(t, newConf.Params)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "parameter paramB is invalid - paramB cannot be empty", err.Error())
|
||||
})
|
||||
|
||||
t.Run("allow empty string value if specified", func(t *testing.T) {
|
||||
//missing required paramB
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "",
|
||||
Param: config.NewParam("paramC", "", true),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "custom",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.NotNil(t, newConf.Params)
|
||||
require.Nil(t, err)
|
||||
})
|
||||
|
||||
t.Run("validate errors if invalid string options value", func(t *testing.T) {
|
||||
//missing required paramB
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "not_random",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
config.StringOptions{
|
||||
Value: "custom",
|
||||
// this isn't ideal we need to ensure options are set for this to validate correctly
|
||||
Options: []string{"random", "very_random", "very_very_random"},
|
||||
Param: config.NewParam("paramD", "", true),
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.Nil(t, newConf.Params)
|
||||
require.NotNil(t, err)
|
||||
require.Equal(t, "parameter paramD is invalid - custom is not a valid value for paramD - [random very_random very_very_random]", err.Error())
|
||||
})
|
||||
|
||||
t.Run("allow empty string value for StringOptions if specified", func(t *testing.T) {
|
||||
//missing required paramB
|
||||
customConf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringParam{
|
||||
Value: "custom",
|
||||
Param: config.NewParam("paramB", "", true),
|
||||
},
|
||||
config.StringOptions{
|
||||
Param: config.Param{},
|
||||
// this isn't ideal we need to ensure options are set for this to validate correctly
|
||||
Options: []string{"random", "very_random", "very_very_random"},
|
||||
Value: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
newConf, err := customConf.ValidateConfig(conf)
|
||||
require.NotNil(t, newConf.Params)
|
||||
require.Nil(t, err)
|
||||
})
|
||||
|
||||
//TODO: Do we need to test if parameters of the same name but wrong type are passed??
|
||||
}
|
@ -1,74 +0,0 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
)
|
||||
|
||||
func ReadStringListValues(conf Configuration, paramName string) ([]string, error) {
|
||||
param, err := conf.GetConfigParam(paramName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value, ok := param.(StringListParam)
|
||||
if !ok {
|
||||
value, ok = param.(StringListParam)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s must be a list of strings", paramName)
|
||||
}
|
||||
}
|
||||
|
||||
return value.Values, nil
|
||||
}
|
||||
|
||||
func ReadStringValue(conf Configuration, paramName string) (string, error) {
|
||||
param, err := conf.GetConfigParam(paramName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
value, ok := param.(StringParam)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%s must be a list of strings", paramName)
|
||||
}
|
||||
return value.Value, nil
|
||||
}
|
||||
|
||||
func ReadIntValue(conf Configuration, paramName string) (int64, error) {
|
||||
param, err := conf.GetConfigParam(paramName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
value, ok := param.(IntParam)
|
||||
if !ok {
|
||||
return 9, fmt.Errorf("%s must be an unsigned integer", paramName)
|
||||
}
|
||||
return value.Value, nil
|
||||
}
|
||||
|
||||
func ReadBoolValue(conf Configuration, paramName string) (bool, error) {
|
||||
param, err := conf.GetConfigParam(paramName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
value, ok := param.(BoolParam)
|
||||
if !ok {
|
||||
return false, fmt.Errorf("%s must be a boolean", paramName)
|
||||
}
|
||||
return value.Value, nil
|
||||
}
|
||||
|
||||
func ReadStringOptionsValue(conf Configuration, paramName string) (string, error) {
|
||||
param, err := conf.GetConfigParam(paramName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
value, ok := param.(StringOptions)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("%s must be a string options", paramName)
|
||||
}
|
||||
if !utils.Contains(value.Options, value.Value) {
|
||||
return "", fmt.Errorf("%s is not a valid option in %v for the the parameter %s", value.Value, value.Options, paramName)
|
||||
}
|
||||
return value.Value, nil
|
||||
}
|
@ -1,142 +0,0 @@
|
||||
package config_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestReadStringListValues(t *testing.T) {
|
||||
|
||||
t.Run("can find a string list param", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
Values: []string{"licenses", "settings"},
|
||||
Param: config.NewParam("exclude_tables", "Specify list of tables not to collect", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
excludeTables, err := config.ReadStringListValues(conf, "exclude_tables")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, []string{"licenses", "settings"}, excludeTables)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestReadStringValue(t *testing.T) {
|
||||
|
||||
t.Run("can find a string param", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "/tmp/dump",
|
||||
Param: config.NewParam("directory", "Specify a directory", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
directory, err := config.ReadStringValue(conf, "directory")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "/tmp/dump", directory)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestReadIntValue(t *testing.T) {
|
||||
t.Run("can find an integer param", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.IntParam{
|
||||
// nil means include everything
|
||||
Value: 10000,
|
||||
Param: config.NewParam("row_limit", "Max Rows to collect", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "/tmp/dump",
|
||||
Param: config.NewParam("directory", "Specify a directory", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
rowLimit, err := config.ReadIntValue(conf, "row_limit")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, int64(10000), rowLimit)
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestReadBoolValue(t *testing.T) {
|
||||
t.Run("can find a boolean param", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.BoolParam{
|
||||
// nil means include everything
|
||||
Value: true,
|
||||
Param: config.NewParam("compress", "Compress data", false),
|
||||
},
|
||||
config.StringListParam{
|
||||
// nil means include everything
|
||||
Values: nil,
|
||||
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
|
||||
},
|
||||
config.StringParam{
|
||||
Value: "/tmp/dump",
|
||||
Param: config.NewParam("directory", "Specify a directory", false),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
compress, err := config.ReadBoolValue(conf, "compress")
|
||||
require.Nil(t, err)
|
||||
require.True(t, compress)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReadStringOptionsValue(t *testing.T) {
|
||||
t.Run("can find a string value in a list of options", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringOptions{
|
||||
Param: config.NewParam("format", "List of formats", false),
|
||||
Options: []string{"csv", "tsv", "binary", "json", "ndjson"},
|
||||
Value: "csv",
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
format, err := config.ReadStringOptionsValue(conf, "format")
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, "csv", format)
|
||||
})
|
||||
|
||||
t.Run("errors on invalid value", func(t *testing.T) {
|
||||
conf := config.Configuration{
|
||||
Params: []config.ConfigParam{
|
||||
config.StringOptions{
|
||||
Param: config.NewParam("format", "List of formats", false),
|
||||
Options: []string{"csv", "tsv", "binary", "json", "ndjson"},
|
||||
Value: "random",
|
||||
AllowEmpty: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
format, err := config.ReadStringOptionsValue(conf, "format")
|
||||
require.Equal(t, "random is not a valid option in [csv tsv binary json ndjson] for the the parameter format", err.Error())
|
||||
require.Equal(t, "", format)
|
||||
})
|
||||
}
|
@ -1,27 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// DiagnosticBundle contains the results from a Collector
|
||||
// each frame can represent a table or collection of data files. By allowing multiple frames a single DiagnosticBundle
|
||||
// can potentially contain many related tables
|
||||
type DiagnosticBundle struct {
|
||||
Frames map[string]Frame
|
||||
// Errors is a property to be set if the Collector has an error. This can be used to indicate a partial collection
|
||||
// and failed frames
|
||||
Errors FrameErrors
|
||||
}
|
||||
|
||||
type FrameErrors struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func (fe *FrameErrors) Error() string {
|
||||
errors := make([]string, len(fe.Errors))
|
||||
for i := range errors {
|
||||
errors[i] = fe.Errors[i].Error()
|
||||
}
|
||||
return strings.Join(errors, "\n")
|
||||
}
|
@ -1,26 +0,0 @@
|
||||
package data_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestBundleError(t *testing.T) {
|
||||
|
||||
t.Run("can get a bundle error", func(t *testing.T) {
|
||||
errs := make([]error, 3)
|
||||
errs[0] = errors.New("Error 1")
|
||||
errs[1] = errors.New("Error 2")
|
||||
errs[2] = errors.New("Error 3")
|
||||
fErrors := data.FrameErrors{
|
||||
Errors: errs,
|
||||
}
|
||||
require.Equal(t, `Error 1
|
||||
Error 2
|
||||
Error 3`, fErrors.Error())
|
||||
|
||||
})
|
||||
}
|
@ -1,88 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type DatabaseFrame struct {
|
||||
name string
|
||||
ColumnNames []string
|
||||
rows *sql.Rows
|
||||
columnTypes []*sql.ColumnType
|
||||
vars []interface{}
|
||||
}
|
||||
|
||||
func NewDatabaseFrame(name string, rows *sql.Rows) (DatabaseFrame, error) {
|
||||
databaseFrame := DatabaseFrame{}
|
||||
columnTypes, err := rows.ColumnTypes()
|
||||
if err != nil {
|
||||
return DatabaseFrame{}, err
|
||||
}
|
||||
databaseFrame.columnTypes = columnTypes
|
||||
databaseFrame.name = name
|
||||
vars := make([]interface{}, len(columnTypes))
|
||||
columnNames := make([]string, len(columnTypes))
|
||||
for i := range columnTypes {
|
||||
value := reflect.Zero(columnTypes[i].ScanType()).Interface()
|
||||
vars[i] = &value
|
||||
columnNames[i] = columnTypes[i].Name()
|
||||
}
|
||||
databaseFrame.ColumnNames = columnNames
|
||||
databaseFrame.vars = vars
|
||||
databaseFrame.rows = rows
|
||||
return databaseFrame, nil
|
||||
}
|
||||
|
||||
func (f DatabaseFrame) Next() ([]interface{}, bool, error) {
|
||||
values := make([]interface{}, len(f.columnTypes))
|
||||
for f.rows.Next() {
|
||||
if err := f.rows.Scan(f.vars...); err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
for i := range f.columnTypes {
|
||||
ptr := reflect.ValueOf(f.vars[i])
|
||||
values[i] = ptr.Elem().Interface()
|
||||
}
|
||||
return values, true, nil //nolint
|
||||
}
|
||||
// TODO: raise issue as this seems to always raise an error
|
||||
//err := f.rows.Err()
|
||||
f.rows.Close()
|
||||
return nil, false, nil
|
||||
}
|
||||
|
||||
func (f DatabaseFrame) Columns() []string {
|
||||
return f.ColumnNames
|
||||
}
|
||||
|
||||
func (f DatabaseFrame) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
type Order int
|
||||
|
||||
const (
|
||||
Asc Order = 1
|
||||
Desc Order = 2
|
||||
)
|
||||
|
||||
type OrderBy struct {
|
||||
Column string
|
||||
Order Order
|
||||
}
|
||||
|
||||
func (o OrderBy) String() string {
|
||||
if strings.TrimSpace(o.Column) == "" {
|
||||
return ""
|
||||
}
|
||||
switch o.Order {
|
||||
case Asc:
|
||||
return fmt.Sprintf(" ORDER BY %s ASC", o.Column)
|
||||
case Desc:
|
||||
return fmt.Sprintf(" ORDER BY %s DESC", o.Column)
|
||||
}
|
||||
return ""
|
||||
}
|
@ -1,86 +0,0 @@
|
||||
package data_test
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/DATA-DOG/go-sqlmock"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestString(t *testing.T) {
|
||||
t.Run("can order by asc", func(t *testing.T) {
|
||||
orderBy := data.OrderBy{
|
||||
Column: "created_at",
|
||||
Order: data.Asc,
|
||||
}
|
||||
require.Equal(t, " ORDER BY created_at ASC", orderBy.String())
|
||||
})
|
||||
|
||||
t.Run("can order by desc", func(t *testing.T) {
|
||||
orderBy := data.OrderBy{
|
||||
Column: "created_at",
|
||||
Order: data.Desc,
|
||||
}
|
||||
require.Equal(t, " ORDER BY created_at DESC", orderBy.String())
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
func TestNextDatabaseFrame(t *testing.T) {
|
||||
|
||||
t.Run("can iterate sql rows", func(t *testing.T) {
|
||||
rowValues := [][]interface{}{
|
||||
{int64(1), "post_1", "hello"},
|
||||
{int64(2), "post_2", "world"},
|
||||
{int64(3), "post_3", "goodbye"},
|
||||
{int64(4), "post_4", "world"},
|
||||
}
|
||||
mockRows := sqlmock.NewRows([]string{"id", "title", "body"})
|
||||
for i := range rowValues {
|
||||
mockRows.AddRow(rowValues[i][0], rowValues[i][1], rowValues[i][2])
|
||||
}
|
||||
rows := mockRowsToSqlRows(mockRows)
|
||||
dbFrame, err := data.NewDatabaseFrame("test", rows)
|
||||
require.ElementsMatch(t, dbFrame.Columns(), []string{"id", "title", "body"})
|
||||
require.Nil(t, err)
|
||||
i := 0
|
||||
for {
|
||||
values, ok, err := dbFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Len(t, values, 3)
|
||||
require.ElementsMatch(t, values, rowValues[i])
|
||||
i++
|
||||
}
|
||||
require.Equal(t, 4, i)
|
||||
})
|
||||
|
||||
t.Run("can iterate empty sql rows", func(t *testing.T) {
|
||||
mockRows := sqlmock.NewRows([]string{"id", "title", "body"})
|
||||
rows := mockRowsToSqlRows(mockRows)
|
||||
dbFrame, err := data.NewDatabaseFrame("test", rows)
|
||||
require.ElementsMatch(t, dbFrame.Columns(), []string{"id", "title", "body"})
|
||||
require.Nil(t, err)
|
||||
i := 0
|
||||
for {
|
||||
_, ok, err := dbFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
i++
|
||||
}
|
||||
require.Equal(t, 0, i)
|
||||
})
|
||||
}
|
||||
|
||||
func mockRowsToSqlRows(mockRows *sqlmock.Rows) *sql.Rows {
|
||||
db, mock, _ := sqlmock.New()
|
||||
mock.ExpectQuery("select").WillReturnRows(mockRows)
|
||||
rows, _ := db.Query("select")
|
||||
return rows
|
||||
}
|
@ -1,8 +0,0 @@
|
||||
package data
|
||||
|
||||
type Field struct {
|
||||
// Name of the field
|
||||
Name string
|
||||
// A list of fields that must implement FieldType interface
|
||||
Values []interface{}
|
||||
}
|
@ -1,444 +0,0 @@
|
||||
package data
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
||||
"github.com/pkg/errors"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
type File interface {
|
||||
Copy(destPath string, removeSensitive bool) error
|
||||
FilePath() string
|
||||
}
|
||||
|
||||
type SimpleFile struct {
|
||||
Path string
|
||||
}
|
||||
|
||||
// Copy supports removeSensitive for other file types but for a simple file this doesn't do anything
|
||||
func (s SimpleFile) Copy(destPath string, removeSensitive bool) error {
|
||||
// simple copy easiest
|
||||
if err := utils.CopyFile(s.FilePath(), destPath); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy file %s", s.FilePath())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s SimpleFile) FilePath() string {
|
||||
return s.Path
|
||||
}
|
||||
|
||||
func NewFileFrame(name string, filePaths []string) FileFrame {
|
||||
i := 0
|
||||
files := make([]File, len(filePaths))
|
||||
for i, path := range filePaths {
|
||||
files[i] = SimpleFile{
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
return FileFrame{
|
||||
name: name,
|
||||
i: &i,
|
||||
files: files,
|
||||
}
|
||||
}
|
||||
|
||||
type FileFrame struct {
|
||||
name string
|
||||
i *int
|
||||
files []File
|
||||
}
|
||||
|
||||
func (f FileFrame) Next() ([]interface{}, bool, error) {
|
||||
if len(f.files) == *(f.i) {
|
||||
return nil, false, nil
|
||||
}
|
||||
file := f.files[*f.i]
|
||||
*f.i++
|
||||
value := make([]interface{}, 1)
|
||||
value[0] = file
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func (f FileFrame) Columns() []string {
|
||||
return []string{"files"}
|
||||
}
|
||||
|
||||
func (f FileFrame) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// DirectoryFileFrame represents a set of files under a directory
|
||||
type DirectoryFileFrame struct {
|
||||
FileFrame
|
||||
Directory string
|
||||
}
|
||||
|
||||
func NewFileDirectoryFrame(directory string, exts []string) (DirectoryFileFrame, []error) {
|
||||
filePaths, errs := utils.ListFilesInDirectory(directory, exts)
|
||||
files := make([]File, len(filePaths))
|
||||
for i, path := range filePaths {
|
||||
files[i] = SimpleFile{
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
i := 0
|
||||
return DirectoryFileFrame{
|
||||
Directory: directory,
|
||||
FileFrame: FileFrame{
|
||||
files: files,
|
||||
i: &i,
|
||||
},
|
||||
}, errs
|
||||
}
|
||||
|
||||
func (f DirectoryFileFrame) Next() ([]interface{}, bool, error) {
|
||||
if len(f.files) == *(f.i) {
|
||||
return nil, false, nil
|
||||
}
|
||||
file := f.files[*f.i]
|
||||
*f.i++
|
||||
value := make([]interface{}, 1)
|
||||
value[0] = file
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func (f DirectoryFileFrame) Columns() []string {
|
||||
return []string{"files"}
|
||||
}
|
||||
|
||||
func (f DirectoryFileFrame) Name() string {
|
||||
return f.Directory
|
||||
}
|
||||
|
||||
type ConfigFile interface {
|
||||
File
|
||||
FindLogPaths() ([]string, error)
|
||||
FindIncludedConfig() (ConfigFile, error)
|
||||
IsIncluded() bool
|
||||
}
|
||||
|
||||
type ConfigFileFrame struct {
|
||||
i *int
|
||||
Directory string
|
||||
files []ConfigFile
|
||||
}
|
||||
|
||||
func (f ConfigFileFrame) Next() ([]interface{}, bool, error) {
|
||||
if len(f.files) == *(f.i) {
|
||||
return nil, false, nil
|
||||
}
|
||||
file := f.files[*f.i]
|
||||
*f.i++
|
||||
value := make([]interface{}, 1)
|
||||
value[0] = file
|
||||
return value, true, nil
|
||||
}
|
||||
|
||||
func (f ConfigFileFrame) Name() string {
|
||||
return f.Directory
|
||||
}
|
||||
|
||||
func NewConfigFileFrame(directory string) (ConfigFileFrame, []error) {
|
||||
files, errs := utils.ListFilesInDirectory(directory, []string{"*.xml", "*.yaml", "*.yml"})
|
||||
// we can't predict the length because of include files
|
||||
var configs []ConfigFile
|
||||
|
||||
for _, path := range files {
|
||||
var configFile ConfigFile
|
||||
switch ext := filepath.Ext(path); ext {
|
||||
case ".xml":
|
||||
configFile = XmlConfigFile{
|
||||
Path: path,
|
||||
Included: false,
|
||||
}
|
||||
case ".yml":
|
||||
configFile = YamlConfigFile{
|
||||
Path: path,
|
||||
Included: false,
|
||||
}
|
||||
case ".yaml":
|
||||
configFile = YamlConfigFile{
|
||||
Path: path,
|
||||
}
|
||||
}
|
||||
if configFile != nil {
|
||||
configs = append(configs, configFile)
|
||||
// add any included configs
|
||||
iConf, err := configFile.FindIncludedConfig()
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
if iConf.FilePath() != "" {
|
||||
configs = append(configs, iConf)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
i := 0
|
||||
|
||||
return ConfigFileFrame{
|
||||
i: &i,
|
||||
Directory: directory,
|
||||
files: configs,
|
||||
}, errs
|
||||
}
|
||||
|
||||
func (f ConfigFileFrame) Columns() []string {
|
||||
return []string{"config"}
|
||||
}
|
||||
|
||||
func (f ConfigFileFrame) FindLogPaths() (logPaths []string, errors []error) {
|
||||
for _, configFile := range f.files {
|
||||
paths, err := configFile.FindLogPaths()
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
} else {
|
||||
logPaths = append(logPaths, paths...)
|
||||
}
|
||||
}
|
||||
return logPaths, errors
|
||||
}
|
||||
|
||||
type XmlConfigFile struct {
|
||||
Path string
|
||||
Included bool
|
||||
}
|
||||
|
||||
// these patterns will be used to remove sensitive content - matches of the pattern will be replaced with the key
|
||||
var xmlSensitivePatterns = map[string]*regexp.Regexp{
|
||||
"<password>Replaced</password>": regexp.MustCompile(`<password>(.*)</password>`),
|
||||
"<password_sha256_hex>Replaced</password_sha256_hex>": regexp.MustCompile(`<password_sha256_hex>(.*)</password_sha256_hex>`),
|
||||
"<secret_access_key>Replaced</secret_access_key>": regexp.MustCompile(`<secret_access_key>(.*)</secret_access_key>`),
|
||||
"<access_key_id>Replaced</access_key_id>": regexp.MustCompile(`<access_key_id>(.*)</access_key_id>`),
|
||||
"<secret>Replaced</secret>": regexp.MustCompile(`<secret>(.*)</secret>`),
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) Copy(destPath string, removeSensitive bool) error {
|
||||
if !removeSensitive {
|
||||
// simple copy easiest
|
||||
if err := utils.CopyFile(x.FilePath(), destPath); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy file %s", x.FilePath())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return sensitiveFileCopy(x.FilePath(), destPath, xmlSensitivePatterns)
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) FilePath() string {
|
||||
return x.Path
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) IsIncluded() bool {
|
||||
return x.Included
|
||||
}
|
||||
|
||||
type XmlLoggerConfig struct {
|
||||
XMLName xml.Name `xml:"logger"`
|
||||
ErrorLog string `xml:"errorlog"`
|
||||
Log string `xml:"log"`
|
||||
}
|
||||
|
||||
type YandexXMLConfig struct {
|
||||
XMLName xml.Name `xml:"yandex"`
|
||||
Clickhouse XmlLoggerConfig `xml:"logger"`
|
||||
IncludeFrom string `xml:"include_from"`
|
||||
}
|
||||
|
||||
type XmlConfig struct {
|
||||
XMLName xml.Name `xml:"clickhouse"`
|
||||
Clickhouse XmlLoggerConfig `xml:"logger"`
|
||||
IncludeFrom string `xml:"include_from"`
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) UnmarshallConfig() (XmlConfig, error) {
|
||||
inputFile, err := ioutil.ReadFile(x.Path)
|
||||
|
||||
if err != nil {
|
||||
return XmlConfig{}, err
|
||||
}
|
||||
var cConfig XmlConfig
|
||||
err = xml.Unmarshal(inputFile, &cConfig)
|
||||
if err == nil {
|
||||
return XmlConfig{
|
||||
Clickhouse: cConfig.Clickhouse,
|
||||
IncludeFrom: cConfig.IncludeFrom,
|
||||
}, nil
|
||||
}
|
||||
// attempt to marshall as yandex file
|
||||
var yConfig YandexXMLConfig
|
||||
err = xml.Unmarshal(inputFile, &yConfig)
|
||||
if err != nil {
|
||||
return XmlConfig{}, err
|
||||
}
|
||||
return XmlConfig{
|
||||
Clickhouse: yConfig.Clickhouse,
|
||||
IncludeFrom: yConfig.IncludeFrom,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) FindLogPaths() ([]string, error) {
|
||||
var paths []string
|
||||
config, err := x.UnmarshallConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Clickhouse.Log != "" {
|
||||
paths = append(paths, config.Clickhouse.Log)
|
||||
}
|
||||
if config.Clickhouse.ErrorLog != "" {
|
||||
paths = append(paths, config.Clickhouse.ErrorLog)
|
||||
}
|
||||
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (x XmlConfigFile) FindIncludedConfig() (ConfigFile, error) {
|
||||
if x.Included {
|
||||
//can't recurse
|
||||
return XmlConfigFile{}, nil
|
||||
}
|
||||
config, err := x.UnmarshallConfig()
|
||||
if err != nil {
|
||||
return XmlConfigFile{}, err
|
||||
}
|
||||
// we need to convert this
|
||||
if config.IncludeFrom != "" {
|
||||
if filepath.IsAbs(config.IncludeFrom) {
|
||||
return XmlConfigFile{Path: config.IncludeFrom, Included: true}, nil
|
||||
}
|
||||
confDir := filepath.Dir(x.FilePath())
|
||||
return XmlConfigFile{Path: path.Join(confDir, config.IncludeFrom), Included: true}, nil
|
||||
}
|
||||
return XmlConfigFile{}, nil
|
||||
}
|
||||
|
||||
type YamlConfigFile struct {
|
||||
Path string
|
||||
Included bool
|
||||
}
|
||||
|
||||
var ymlSensitivePatterns = map[string]*regexp.Regexp{
|
||||
"password: 'Replaced'": regexp.MustCompile(`password:\s*.*$`),
|
||||
"password_sha256_hex: 'Replaced'": regexp.MustCompile(`password_sha256_hex:\s*.*$`),
|
||||
"access_key_id: 'Replaced'": regexp.MustCompile(`access_key_id:\s*.*$`),
|
||||
"secret_access_key: 'Replaced'": regexp.MustCompile(`secret_access_key:\s*.*$`),
|
||||
"secret: 'Replaced'": regexp.MustCompile(`secret:\s*.*$`),
|
||||
}
|
||||
|
||||
func (y YamlConfigFile) Copy(destPath string, removeSensitive bool) error {
|
||||
if !removeSensitive {
|
||||
// simple copy easiest
|
||||
if err := utils.CopyFile(y.FilePath(), destPath); err != nil {
|
||||
return errors.Wrapf(err, "unable to copy file %s", y.FilePath())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return sensitiveFileCopy(y.FilePath(), destPath, ymlSensitivePatterns)
|
||||
}
|
||||
|
||||
func (y YamlConfigFile) FilePath() string {
|
||||
return y.Path
|
||||
}
|
||||
|
||||
func (y YamlConfigFile) IsIncluded() bool {
|
||||
return y.Included
|
||||
}
|
||||
|
||||
type YamlLoggerConfig struct {
|
||||
Log string
|
||||
ErrorLog string
|
||||
}
|
||||
|
||||
type YamlConfig struct {
|
||||
Logger YamlLoggerConfig
|
||||
Include_From string
|
||||
}
|
||||
|
||||
func (y YamlConfigFile) FindLogPaths() ([]string, error) {
|
||||
var paths []string
|
||||
inputFile, err := ioutil.ReadFile(y.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var config YamlConfig
|
||||
err = yaml.Unmarshal(inputFile, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.Logger.Log != "" {
|
||||
paths = append(paths, config.Logger.Log)
|
||||
}
|
||||
if config.Logger.ErrorLog != "" {
|
||||
paths = append(paths, config.Logger.ErrorLog)
|
||||
}
|
||||
return paths, nil
|
||||
}
|
||||
|
||||
func (y YamlConfigFile) FindIncludedConfig() (ConfigFile, error) {
|
||||
if y.Included {
|
||||
//can't recurse
|
||||
return YamlConfigFile{}, nil
|
||||
}
|
||||
inputFile, err := ioutil.ReadFile(y.Path)
|
||||
if err != nil {
|
||||
return YamlConfigFile{}, err
|
||||
}
|
||||
var config YamlConfig
|
||||
err = yaml.Unmarshal(inputFile, &config)
|
||||
if err != nil {
|
||||
return YamlConfigFile{}, err
|
||||
}
|
||||
if config.Include_From != "" {
|
||||
if filepath.IsAbs(config.Include_From) {
|
||||
return YamlConfigFile{Path: config.Include_From, Included: true}, nil
|
||||
}
|
||||
confDir := filepath.Dir(y.FilePath())
|
||||
return YamlConfigFile{Path: path.Join(confDir, config.Include_From), Included: true}, nil
|
||||
}
|
||||
return YamlConfigFile{}, nil
|
||||
}
|
||||
|
||||
func sensitiveFileCopy(sourcePath string, destPath string, patterns map[string]*regexp.Regexp) error {
|
||||
destDir := filepath.Dir(destPath)
|
||||
if err := os.MkdirAll(destDir, os.ModePerm); err != nil {
|
||||
return errors.Wrapf(err, "unable to create directory %s", destDir)
|
||||
}
|
||||
// currently, we don't unmarshall into a struct - we want to preserve structure and comments. Possibly could
|
||||
// be handled but for simplicity we do a line parse for now
|
||||
inputFile, err := os.Open(sourcePath)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer inputFile.Close()
|
||||
outputFile, err := os.Create(destPath)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer outputFile.Close()
|
||||
writer := bufio.NewWriter(outputFile)
|
||||
scanner := bufio.NewScanner(inputFile)
|
||||
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
for repl, pattern := range patterns {
|
||||
line = pattern.ReplaceAllString(line, repl)
|
||||
}
|
||||
_, err = writer.WriteString(line + "\n")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
writer.Flush()
|
||||
return nil
|
||||
}
|
@ -1,263 +0,0 @@
|
||||
package data_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestNextFileDirectoryFrame(t *testing.T) {
|
||||
t.Run("can iterate file frame", func(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
files := make([]string, 5)
|
||||
for i := 0; i < 5; i++ {
|
||||
fileDir := path.Join(tempDir, fmt.Sprintf("%d", i))
|
||||
err := os.MkdirAll(fileDir, os.ModePerm)
|
||||
require.Nil(t, err)
|
||||
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.txt", i))
|
||||
files[i] = filepath
|
||||
_, err = os.Create(filepath)
|
||||
require.Nil(t, err)
|
||||
}
|
||||
fileFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*.txt"})
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
for {
|
||||
values, ok, err := fileFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Len(t, values, 1)
|
||||
require.Equal(t, files[i], values[0].(data.SimpleFile).Path)
|
||||
i += 1
|
||||
}
|
||||
require.Equal(t, 5, i)
|
||||
})
|
||||
|
||||
t.Run("can iterate file frame when empty", func(t *testing.T) {
|
||||
// create 5 temporary files
|
||||
tempDir := t.TempDir()
|
||||
fileFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*"})
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
for {
|
||||
_, ok, err := fileFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, 0, i)
|
||||
})
|
||||
}
|
||||
|
||||
func TestNewConfigFileFrame(t *testing.T) {
|
||||
t.Run("can iterate config file frame", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
for {
|
||||
values, ok, err := configFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Len(t, values, 1)
|
||||
filePath := values[0].(data.XmlConfigFile).FilePath()
|
||||
require.True(t, strings.Contains(filePath, ".xml"))
|
||||
i += 1
|
||||
}
|
||||
// 5 not 3 due to the includes
|
||||
require.Equal(t, 5, i)
|
||||
})
|
||||
|
||||
t.Run("can iterate file frame when empty", func(t *testing.T) {
|
||||
// create 5 temporary files
|
||||
tempDir := t.TempDir()
|
||||
configFrame, errs := data.NewConfigFileFrame(tempDir)
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
for {
|
||||
_, ok, err := configFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, 0, i)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigFileFrameCopy(t *testing.T) {
|
||||
t.Run("can copy non-sensitive xml config files", func(t *testing.T) {
|
||||
tmrDir := t.TempDir()
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
|
||||
require.Empty(t, errs)
|
||||
for {
|
||||
values, ok, err := configFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Nil(t, err)
|
||||
require.True(t, ok)
|
||||
configFile := values[0].(data.XmlConfigFile)
|
||||
newPath := path.Join(tmrDir, filepath.Base(configFile.FilePath()))
|
||||
err = configFile.Copy(newPath, false)
|
||||
require.FileExists(t, newPath)
|
||||
sourceInfo, _ := os.Stat(configFile.FilePath())
|
||||
destInfo, _ := os.Stat(newPath)
|
||||
require.Equal(t, sourceInfo.Size(), destInfo.Size())
|
||||
require.Nil(t, err)
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("can copy sensitive xml config files", func(t *testing.T) {
|
||||
tmrDir := t.TempDir()
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
var checkedFiles []string
|
||||
for {
|
||||
values, ok, err := configFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Nil(t, err)
|
||||
require.True(t, ok)
|
||||
configFile := values[0].(data.XmlConfigFile)
|
||||
fileName := filepath.Base(configFile.FilePath())
|
||||
newPath := path.Join(tmrDir, fileName)
|
||||
err = configFile.Copy(newPath, true)
|
||||
require.FileExists(t, newPath)
|
||||
require.Nil(t, err)
|
||||
bytes, err := ioutil.ReadFile(newPath)
|
||||
require.Nil(t, err)
|
||||
s := string(bytes)
|
||||
checkedFiles = append(checkedFiles, fileName)
|
||||
if fileName == "users.xml" || fileName == "default-password.xml" || fileName == "user-include.xml" {
|
||||
require.True(t, strings.Contains(s, "<password>Replaced</password>") ||
|
||||
strings.Contains(s, "<password_sha256_hex>Replaced</password_sha256_hex>"))
|
||||
require.NotContains(t, s, "<password>REPLACE_ME</password>")
|
||||
require.NotContains(t, s, "<password_sha256_hex>REPLACE_ME</password_sha256_hex>")
|
||||
} else if fileName == "config.xml" {
|
||||
require.True(t, strings.Contains(s, "<access_key_id>Replaced</access_key_id>"))
|
||||
require.True(t, strings.Contains(s, "<secret_access_key>Replaced</secret_access_key>"))
|
||||
require.True(t, strings.Contains(s, "<secret>Replaced</secret>"))
|
||||
require.NotContains(t, s, "<access_key_id>REPLACE_ME</access_key_id>")
|
||||
require.NotContains(t, s, "<secret_access_key>REPLACE_ME</secret_access_key>")
|
||||
require.NotContains(t, s, "<secret>REPLACE_ME</secret>")
|
||||
}
|
||||
i++
|
||||
}
|
||||
require.ElementsMatch(t, []string{"users.xml", "default-password.xml", "user-include.xml", "config.xml", "server-include.xml"}, checkedFiles)
|
||||
require.Equal(t, 5, i)
|
||||
})
|
||||
|
||||
t.Run("can copy sensitive yaml config files", func(t *testing.T) {
|
||||
tmrDir := t.TempDir()
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yaml"))
|
||||
require.Empty(t, errs)
|
||||
i := 0
|
||||
var checkedFiles []string
|
||||
for {
|
||||
values, ok, err := configFrame.Next()
|
||||
require.Nil(t, err)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
require.Nil(t, err)
|
||||
require.True(t, ok)
|
||||
configFile := values[0].(data.YamlConfigFile)
|
||||
fileName := filepath.Base(configFile.FilePath())
|
||||
newPath := path.Join(tmrDir, fileName)
|
||||
err = configFile.Copy(newPath, true)
|
||||
require.FileExists(t, newPath)
|
||||
require.Nil(t, err)
|
||||
bytes, err := ioutil.ReadFile(newPath)
|
||||
require.Nil(t, err)
|
||||
s := string(bytes)
|
||||
checkedFiles = append(checkedFiles, fileName)
|
||||
if fileName == "users.yaml" || fileName == "default-password.yaml" || fileName == "user-include.yaml" {
|
||||
require.True(t, strings.Contains(s, "password: 'Replaced'") ||
|
||||
strings.Contains(s, "password_sha256_hex: 'Replaced'"))
|
||||
require.NotContains(t, s, "password: 'REPLACE_ME'")
|
||||
require.NotContains(t, s, "password_sha256_hex: \"REPLACE_ME\"")
|
||||
} else if fileName == "config.yaml" {
|
||||
require.True(t, strings.Contains(s, "access_key_id: 'Replaced'"))
|
||||
require.True(t, strings.Contains(s, "secret_access_key: 'Replaced'"))
|
||||
require.True(t, strings.Contains(s, "secret: 'Replaced'"))
|
||||
require.NotContains(t, s, "access_key_id: 'REPLACE_ME'")
|
||||
require.NotContains(t, s, "secret_access_key: REPLACE_ME")
|
||||
require.NotContains(t, s, "secret: REPLACE_ME")
|
||||
}
|
||||
i++
|
||||
}
|
||||
require.ElementsMatch(t, []string{"users.yaml", "default-password.yaml", "user-include.yaml", "config.yaml", "server-include.yaml"}, checkedFiles)
|
||||
require.Equal(t, 5, i)
|
||||
})
|
||||
}
|
||||
|
||||
func TestConfigFileFrameFindLogPaths(t *testing.T) {
|
||||
t.Run("can find xml log paths", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
|
||||
require.Empty(t, errs)
|
||||
paths, errs := configFrame.FindLogPaths()
|
||||
require.Empty(t, errs)
|
||||
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
|
||||
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
|
||||
})
|
||||
|
||||
t.Run("can handle empty log paths", func(t *testing.T) {
|
||||
configFrame, errs := data.NewConfigFileFrame(t.TempDir())
|
||||
require.Empty(t, errs)
|
||||
paths, errs := configFrame.FindLogPaths()
|
||||
require.Empty(t, errs)
|
||||
require.Empty(t, paths)
|
||||
})
|
||||
|
||||
t.Run("can find yaml log paths", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yaml"))
|
||||
require.Empty(t, errs)
|
||||
paths, errs := configFrame.FindLogPaths()
|
||||
require.Empty(t, errs)
|
||||
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
|
||||
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
|
||||
})
|
||||
}
|
||||
|
||||
// test the legacy format for ClickHouse xml config files with a yandex root tag
|
||||
func TestYandexConfigFile(t *testing.T) {
|
||||
t.Run("can find xml log paths with yandex root", func(t *testing.T) {
|
||||
cwd, err := os.Getwd()
|
||||
require.Nil(t, err)
|
||||
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yandex_xml"))
|
||||
require.Empty(t, errs)
|
||||
paths, errs := configFrame.FindLogPaths()
|
||||
require.Empty(t, errs)
|
||||
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
|
||||
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
|
||||
})
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user