Remove clickhouse-diagnostics

This commit is contained in:
Alexey Milovidov 2024-03-21 02:52:51 +01:00
parent 03e7228641
commit bfc7c3c89e
89 changed files with 1 additions and 13031 deletions

View File

@ -61,7 +61,7 @@ RUN arch=${TARGETARCH:-amd64} \
&& rm /tmp/nfpm.deb
ARG GO_VERSION=1.19.10
# We need go for clickhouse-diagnostics
# We needed go for clickhouse-diagnostics (it is not used anymore)
RUN arch=${TARGETARCH:-amd64} \
&& curl -Lo /tmp/go.tgz "https://go.dev/dl/go${GO_VERSION}.linux-${arch}.tar.gz" \
&& tar -xzf /tmp/go.tgz -C /usr/local/ \

View File

@ -36,22 +36,6 @@ rm -f CMakeCache.txt
if [ -n "$MAKE_DEB" ]; then
rm -rf /build/packages/root
# NOTE: this is for backward compatibility with previous releases,
# that does not diagnostics tool (only script).
if [ -d /build/programs/diagnostics ]; then
if [ -z "$SANITIZER" ]; then
# We need to check if clickhouse-diagnostics is fine and build it
(
cd /build/programs/diagnostics
make test-no-docker
GOARCH="${DEB_ARCH}" CGO_ENABLED=0 make VERSION="$VERSION_STRING" build
mv clickhouse-diagnostics ..
)
else
echo -e "#!/bin/sh\necho 'Not implemented for this type of package'" > /build/programs/clickhouse-diagnostics
chmod +x /build/programs/clickhouse-diagnostics
fi
fi
fi
@ -121,8 +105,6 @@ if [ -n "$MAKE_DEB" ]; then
# No quotes because I want it to expand to nothing if empty.
# shellcheck disable=SC2086
DESTDIR=/build/packages/root ninja $NINJA_FLAGS programs/install
cp /build/programs/clickhouse-diagnostics /build/packages/root/usr/bin
cp /build/programs/clickhouse-diagnostics /output
bash -x /build/packages/build
fi

View File

@ -34,8 +34,6 @@ suggests:
contents:
- src: root/usr/bin/clickhouse
dst: /usr/bin/clickhouse
- src: root/usr/bin/clickhouse-diagnostics
dst: /usr/bin/clickhouse-diagnostics
- src: root/usr/bin/clickhouse-extract-from-config
dst: /usr/bin/clickhouse-extract-from-config
- src: root/usr/bin/clickhouse-library-bridge

View File

@ -1,30 +0,0 @@
# If you prefer the allow list template instead of the deny list, see community template:
# https://github.com/github/gitignore/blob/main/community/Golang/Go.AllowList.gitignore
#
# Binaries for programs and plugins
*.exe
*.exe~
*.dll
*.so
*.dylib
# Test binary, built with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Dependency directories (remove the comment below to include it)
# vendor/
# Go workspace file
go.work
.idea
clickhouse-diagnostics
output
vendor
bin
profile.cov
clickhouse-diagnostics.yml
dist/

View File

@ -1,49 +0,0 @@
# Contribution
We keep things simple. Execute all commands in this folder.
## Requirements
- docker - tested on version 20.10.12.
- golang >= go1.17.6
## Building
Creates a binary `clickhouse-diagnostics` in the local folder. Build will be versioned according to a timestamp. For a versioned release see [Releasing](#releasing).
```bash
make build
```
## Linting
We use [golangci-lint](https://golangci-lint.run/). We use a container to run so no need to install.
```bash
make lint-go
```
## Running Tests
```bash
make test
```
For a coverage report,
```bash
make test-coverage
```
## Adding Collectors
TODO
## Adding Outputs
TODO
## Frames
## Parameter Types

View File

@ -1,65 +0,0 @@
GOCMD=go
GOTEST=$(GOCMD) test
BINARY_NAME=clickhouse-diagnostics
BUILD_DIR=dist
TIMESTAMP := $(shell date +%Y%m%d-%H%M)
COMMIT := $(shell git rev-parse --short HEAD)
MODULE := github.com/ClickHouse/ClickHouse/programs/diagnostics
VERSION := v.dev-${TIMESTAMP}
DEVLDFLAGS = -ldflags "-X ${MODULE}/cmd.Version=${VERSION} -X ${MODULE}/cmd.Commit=${COMMIT}"
# override with env variable to test other versions e.g. 21.11.10.1
CLICKHOUSE_VERSION ?= latest
GREEN := $(shell tput -Txterm setaf 2)
YELLOW := $(shell tput -Txterm setaf 3)
WHITE := $(shell tput -Txterm setaf 7)
CYAN := $(shell tput -Txterm setaf 6)
RESET := $(shell tput -Txterm sgr0)
.PHONY: all test build vendor release lint-go test-coverages dep
all: help
release: ## Release is delegated to goreleaser
$(shell goreleaser release --rm-dist)
## Build:
build: ## Build a binary for local use
# timestamped version
$(GOCMD) build ${DEVLDFLAGS} -o $(BINARY_NAME) ./cmd/clickhouse-diagnostics
clean: ## Remove build related file
rm ${BINARY_NAME}
rm -f checkstyle-report.xml ./coverage.xml ./profile.cov
vendor: ## Copy of all packages needed to support builds and tests in the vendor directory
$(GOCMD) mod vendor
test: ## Run the tests of the project
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -v -race `go list ./... | grep -v ./internal/platform/test`
test-no-docker: ## Don't run tests depending on dockerd
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -v -race -tags no_docker `go list ./... | grep -v ./internal/platform/test`
lint-go: ## Use golintci-lint
docker run --rm -v $(shell pwd):/app -w /app golangci/golangci-lint:latest-alpine golangci-lint run
test-coverage: ## Run the tests of the project and export the coverage
CLICKHOUSE_VERSION=$(CLICKHOUSE_VERSION) $(GOTEST) -cover -covermode=count -coverprofile=profile.cov `go list ./... | grep -v ./internal/platform/test`
$(GOCMD) tool cover -func profile.cov
dep:
$(shell go mod download)
help: ## Show this help.
@echo ''
@echo 'Usage:'
@echo ' ${YELLOW}make${RESET} ${GREEN}<target>${RESET}'
@echo ''
@echo 'Targets:'
@awk 'BEGIN {FS = ":.*?## "} { \
if (/^[a-zA-Z_-]+:.*?##.*$$/) {printf " ${YELLOW}%-20s${GREEN}%s${RESET}\n", $$1, $$2} \
else if (/^## .*$$/) {printf " ${CYAN}%s${RESET}\n", substr($$1,4)} \
}' $(MAKEFILE_LIST)

View File

@ -1,167 +0,0 @@
# Clickhouse Diagnostics Tool
## Purpose
This tool provides a means of obtaining a diagnostic bundle from a ClickHouse instance. This bundle can be provided to your nearest ClickHouse support provider in order to assist with the diagnosis of issues.
## Design Philosophy
- **No local dependencies** to run. We compile to a platform-independent binary, hence Go.
- **Minimize resource overhead**. Improvements always welcome.
- **Extendable framework**. At its core, the tool provides collectors and outputs. Collectors are independent and are responsible for collecting a specific dataset e.g. system configuration. Outputs produce the diagnostic bundle in a specific format. It should be trivial to add both for contributors. See [Collectors](#collectors) and [Outputs](#outputs) for more details.
- **Convertible output formats**. Outputs produce diagnostic bundles in different formats e.g. archive, simple report etc. Where possible, it should be possible to convert between these formats. For example, an administrator may provide a bundle as an archive to their support provider who in turn wishes to visualise this as a report or even in ClickHouse itself...
- **Something is better than nothing**. Collectors execute independently. We never fail a collection because one fails - preferring to warn the user only. There are good reasons for a collector failure e.g. insufficient permissions or missing data.
- **Execute anywhere** - Ideally, this tool is executed on a ClickHouse host. Some collectors e.g. configuration file collection or system information, rely on this. However, collectors will obtain as much information remotely from the database as possible if executed remotely from the cluster - warning where collection fails. **We do currently require ClickHouse to be running, connecting over the native port**.
We recommend reading [Permissions, Warnings & Locality](#permissions-warnings--locality).
## Usage
### Collection
The `collect` command allows the collection of a diagnostic bundle. In its simplest form, assuming ClickHouse is running locally on default ports with no password:
```bash
clickhouse-diagnostics collect
```
This will use the default collectors and the simple output. This output produces a timestamped archive bundle in `gz` format in a sub folder named after the host. This folder name can be controlled via the parameter `--id` or configured directly for the simple output parameter `output.simple.folder` (this allows a specific directory to be specified).
Collectors, Outputs and ClickHouse connection credentials can be specified as shown below:
```bash
clickhouse-diagnostics collect --password random --username default --collector=system_db,system --output=simple --id my_cluster_name
```
This collects the system database and host information from the cluster running locally. The archive bundle will be produced under a folder `my_cluster_name`.
For further details, use the in built help (the commands below are equivalent):
```bash
clickhouse-diagnostics collect --help
./clickhouse-diagnostics help collect
```
### Help & Finding parameters for collectors & outputs
Collectors and outputs have their own parameters not listed under the help for the command for the `collect` command. These can be identified using the `help` command. Specifically,
For more information about a specific collector.
```bash
Use "clickhouse-diagnostics help --collector [collector]"
```
For more information about a specific output.
```bash
Use "clickhouse-diagnostics help --output [output]"
```
### Convert
Coming soon to a cluster near you...
## Collectors
We currently support the following collectors. A `*` indicates this collector is enabled by default:
- `system_db*` - Collects all tables in the system database, except those which have been excluded and up to a specified row limit.
- `system*` - Collects summary OS and hardware statistics for the host.
- `config*` - Collects the ClickHouse configuration from the local filesystem. A best effort is made using process information if ClickHouse is not installed locally. `include_path` are also considered.
- `db_logs*` - Collects the ClickHouse logs directly from the database.
- `logs*` - Collects the ClickHouse logs directly from the database.
- `summary*` - Collects summary statistics on the database based on a set of known useful queries. This represents the easiest collector to extend - contributions are welcome to this set which can be found [here](https://github.com/ClickHouse/ClickHouse/blob/master/programs/diagnostics/internal/collectors/clickhouse/queries.json).
- `file` - Collects files based on glob patterns. Does not collect directories. To preview files which will be collected try, `clickhouse-diagnostics collect --collectors=file --collector.file.file_pattern=<glob path> --output report`
- `command` - Collects the output of a user specified command. To preview output, `clickhouse-diagnostics collect --collectors=command --collector.command.command="<command>" --output report`
- `zookeeper_db` - Collects information about zookeeper using the `system.zookeeper` table, recursively iterating the zookeeper tree/table. Note: changing the default parameter values can cause extremely high load to be placed on the database. Use with caution. By default, uses the glob `/clickhouse/{task_queue}/**` to match zookeeper paths and iterates to a max depth of 8.
## Outputs
We currently support the following outputs. The `simple` output is currently the default:
- `simple` - Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive.
- `report` - Writes out the diagnostic bundle to the terminal as a simple report. Supports an ascii table format or markdown.
- `clickhouse` - **Under development**. This will allow a bundle to be stored in a cluster allowing visualization in common tooling e.g. Grafana.
## Simple Output
Since the `simple` output is the default we provide additional details here.
This output produces a timestamped archive by default in `gz` format under a directory created with either the hostname of the specified collection `--id`. As shown below, a specific folder can also be specified. Compression can also be disabled, leaving just the contents of the folder:
```bash
./clickhouse-diagnostics help --output simple
Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive.
Usage:
--output=simple [flags]
Flags:
--output.simple.directory string Directory in which to create dump. Defaults to the current directory. (default "./")
--output.simple.format string Format of exported files (default "csv")
--output.simple.skip_archive Don't compress output to an archive
```
The archive itself contains a folder for each collector. Each collector can potentially produce many discrete sets of data, known as frames. Each of these typically results in a single file within the collector's folder. For example, each query for the `summary` collector results in a correspondingly named file within the `summary` folder.
## Permissions, Warnings & Locality
Some collectors either require specific permissions for complete collection or should be executed on a ClickHouse host. We aim to collate these requirements below:
- `system_db` - This collect aims to collect all tables in the `system` database. Some tables may fail if certain features are not enabled. Specifically,[allow_introspection_functions](https://clickhouse.com/docs/en/operations/settings/settings/#settings-allow_introspection_functions) is required to collect the `stack_traces` table. [access_management](https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting) must be set for the ClickHouse user specified for collection, to permit access to access management tables e.g. `quota_usage`.
- `db_logs`- The ClickHouse user must have access to the tables `query_log`,`query_thread_log` and `text_log`.
- `logs` - The system user under which the tool is executed must have access to the logs directory. It must therefore also be executed on the target ClickHouse server directly for this collector work. In cases where the logs directory is not a default location e.g. `/var/log/clickhouse-server` we will attempt to establish the location from the ClickHouse configuration. This requires permissions to read the configuration files - which in most cases requires specific permissions to be granted to the run user if you are not comfortable executing the tool under sudo or the `clickhouse` user.
- `summary`- This collector executes pre-recorded queries. Some of these read tables concerning access management, thus requiring the ClickHouse user to have the [access_management](https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting) permission.
- `config` - This collector reads and copies the local configuration files. It thus requires permissions to read the configuration files - which in most cases requires specific permissions to be granted to the run user if you are not comfortable executing the tool under sudo or the `clickhouse` user.
**If a collector cannot collect specific data because of either execution location or permissions, it will log a warning to the terminal.**
## Logging
All logs are output to `stderr`. `stdout` is used exclusively for outputs to print information.
## Configuration file
In addition to supporting parameters via the command line, a configuration file can be specified via the `--config`, `-f` flag.
By default, we look for a configuration file `clickhouse-diagnostics.yml` in the same directory as the binary. If not present, we revert to command line flags.
**Values set via the command line values always take precedence over those in the configuration file.**
All parameters can be set via the configuration file and can in most cases be converted to a yaml hierarchy, where periods indicate a nesting. For example,
`--collector.system_db.row_limit=1`
becomes
```yaml
collector:
system_db:
row_limit: 1
```
The following exceptions exist to avoid collisions:
| Command | Parameter | Configuration File |
|---------|------------|--------------------|
| collect | output | collect.output |
| collect | collectors | collect.collectors |
## FAQ
1. Does the collector need root permissions?
No. However, to read some local files e.g. configurations, the tool should be executed as the `clickhouse` user.
2. What ClickHouse database permissions does the collector need?
Read permissions on all system tables are required in most cases - although only specific collectors need this. [Access management permissions]((https://clickhouse.com/docs/en/operations/settings/settings-users/#access_management-user-setting)) will ensure full collection.
3. Is any processing done on logs for anonimization purposes?
Currently no. ClickHouse should not log sensitive information to logs e.g. passwords.
4. Is sensitive information removed from configuration files e.g. passwords?
Yes. We remove both passwords and hashed passwords. Please raise an issue if you require further information to be anonimized. We appreciate this is a sensitive topic.

View File

@ -1,9 +0,0 @@
package main
import (
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd"
)
func main() {
cmd.Execute()
}

View File

@ -1,159 +0,0 @@
package cmd
import (
"fmt"
"os"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/terminal"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"github.com/spf13/viper"
)
var id string
var output = params.StringOptionsVar{
Options: outputs.GetOutputNames(),
Value: "simple",
}
// access credentials
var host string
var port uint16
var username string
var password string
var collectorNames = params.StringSliceOptionsVar{
Options: collectors.GetCollectorNames(false),
Values: collectors.GetCollectorNames(true),
}
// holds the collector params passed by the cli
var collectorParams params.ParamMap
// holds the output params passed by the cli
var outputParams params.ParamMap
const collectHelpTemplate = `Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Global Flags:
{{.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}
Additional help topics:
Use "{{.CommandPath}} [command] --help" for more information about a command.
Use "{{.Parent.Name}} help --collector [collector]" for more information about a specific collector.
Use "{{.Parent.Name}} help --output [output]" for more information about a specific output.
`
func init() {
collectCmd.Flags().StringVar(&id, "id", getHostName(), "Id of diagnostic bundle")
// access credentials
collectCmd.Flags().StringVar(&host, "host", "localhost", "ClickHouse host")
collectCmd.Flags().Uint16VarP(&port, "port", "p", 9000, "ClickHouse native port")
collectCmd.Flags().StringVarP(&username, "username", "u", "", "ClickHouse username")
collectCmd.Flags().StringVar(&password, "password", "", "ClickHouse password")
// collectors and outputs
collectCmd.Flags().VarP(&output, "output", "o", fmt.Sprintf("Output Format for the diagnostic Bundle, options: [%s]\n", strings.Join(output.Options, ",")))
collectCmd.Flags().VarP(&collectorNames, "collectors", "c", fmt.Sprintf("Collectors to use, options: [%s]\n", strings.Join(collectorNames.Options, ",")))
collectorConfigs, err := collectors.BuildConfigurationOptions()
if err != nil {
log.Fatal().Err(err).Msg("Unable to build collector configurations")
}
collectorParams = params.NewParamMap(collectorConfigs)
outputConfigs, err := outputs.BuildConfigurationOptions()
if err != nil {
log.Fatal().Err(err).Msg("Unable to build output configurations")
}
params.AddParamMapToCmd(collectorParams, collectCmd, "collector", true)
outputParams = params.NewParamMap(outputConfigs)
params.AddParamMapToCmd(outputParams, collectCmd, "output", true)
collectCmd.SetFlagErrorFunc(handleFlagErrors)
collectCmd.SetHelpTemplate(collectHelpTemplate)
rootCmd.AddCommand(collectCmd)
}
var collectCmd = &cobra.Command{
Use: "collect",
Short: "Collect a diagnostic bundle",
Long: `Collect a ClickHouse diagnostic bundle for a specified ClickHouse instance`,
PreRun: func(cmd *cobra.Command, args []string) {
bindFlagsToConfig(cmd)
},
Example: fmt.Sprintf(`%s collect --username default --collector=%s --output=simple`, rootCmd.Name(), strings.Join(collectorNames.Options[:2], ",")),
Run: func(cmd *cobra.Command, args []string) {
log.Info().Msgf("executing collect command with %v collectors and %s output", collectorNames.Values, output.Value)
outputConfig := params.ConvertParamsToConfig(outputParams)[output.Value]
runConfig := internal.NewRunConfiguration(id, host, port, username, password, output.Value, outputConfig, collectorNames.Values, params.ConvertParamsToConfig(collectorParams))
internal.Capture(runConfig)
os.Exit(0)
},
}
func getHostName() string {
name, err := os.Hostname()
if err != nil {
name = "clickhouse-diagnostics"
}
return name
}
// these flags are nested under the cmd name in the config file to prevent collisions
var flagsToNest = []string{"output", "collectors"}
// this saves us binding each command manually to viper
func bindFlagsToConfig(cmd *cobra.Command) {
cmd.Flags().VisitAll(func(f *pflag.Flag) {
err := viper.BindEnv(f.Name, fmt.Sprintf("%s_%s", envPrefix,
strings.ToUpper(strings.Replace(f.Name, ".", "_", -1))))
if err != nil {
log.Error().Msgf("Unable to bind %s to config", f.Name)
}
configFlagName := f.Name
if utils.Contains(flagsToNest, f.Name) {
configFlagName = fmt.Sprintf("%s.%s", cmd.Use, configFlagName)
}
err = viper.BindPFlag(configFlagName, f)
if err != nil {
log.Error().Msgf("Unable to bind %s to config", f.Name)
}
// here we prefer the config value when the param is not set on the cmd line
if !f.Changed && viper.IsSet(configFlagName) {
val := viper.Get(configFlagName)
log.Debug().Msgf("Setting parameter %s from configuration file", f.Name)
err = cmd.Flags().Set(f.Name, fmt.Sprintf("%v", val))
if err != nil {
log.Error().Msgf("Unable to read \"%s\" value from config", f.Name)
} else {
log.Debug().Msgf("Set parameter \"%s\" from configuration", f.Name)
}
}
})
}

View File

@ -1 +0,0 @@
package cmd

View File

@ -1,124 +0,0 @@
package cmd
import (
"fmt"
"os"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
)
var cHelp = params.StringOptionsVar{
Options: collectors.GetCollectorNames(false),
Value: "",
}
var oHelp = params.StringOptionsVar{
Options: outputs.GetOutputNames(),
Value: "",
}
func init() {
helpCmd.Flags().VarP(&cHelp, "collector", "c", "Specify collector to get description of available flags")
helpCmd.Flags().VarP(&oHelp, "output", "o", "Specify output to get description of available flags")
helpCmd.SetUsageTemplate(`Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}
Available Commands:{{range .Parent.Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}
Alternatively use "{{.CommandPath}} [command] --help" for more information about a command.
`)
helpCmd.SetFlagErrorFunc(handleFlagErrors)
}
var helpCmd = &cobra.Command{
Use: "help [command]",
Short: "Help about any command, collector or output",
Long: `Help provides help for any command, collector or output in the application.`,
Example: fmt.Sprintf(`%[1]v help collect
%[1]v help --collector=config
%[1]v help --output=simple`, rootCmd.Name()),
Run: func(c *cobra.Command, args []string) {
if len(args) != 0 {
//find the command on which help is requested
cmd, _, e := c.Root().Find(args)
if cmd == nil || e != nil {
c.Printf("Unknown help topic %#q\n", args)
cobra.CheckErr(c.Root().Usage())
} else {
cmd.InitDefaultHelpFlag()
cobra.CheckErr(cmd.Help())
}
return
}
if cHelp.Value != "" && oHelp.Value != "" {
log.Error().Msg("Specify either --collector or --output not both")
_ = c.Help()
os.Exit(1)
}
if cHelp.Value != "" {
collector, err := collectors.GetCollectorByName(cHelp.Value)
if err != nil {
log.Fatal().Err(err).Msgf("Unable to initialize collector %s", cHelp.Value)
}
configHelp(collector.Configuration(), "collector", cHelp.Value, collector.Description())
} else if oHelp.Value != "" {
output, err := outputs.GetOutputByName(oHelp.Value)
if err != nil {
log.Fatal().Err(err).Msgf("Unable to initialize output %s", oHelp.Value)
}
configHelp(output.Configuration(), "output", oHelp.Value, output.Description())
} else {
_ = c.Help()
}
os.Exit(0)
},
}
func configHelp(conf config.Configuration, componentType, name, description string) {
paramMap := params.NewParamMap(map[string]config.Configuration{
name: conf,
})
tempHelpCmd := &cobra.Command{
Use: fmt.Sprintf("--%s=%s", componentType, name),
Short: fmt.Sprintf("Help about the %s collector", name),
Long: description,
SilenceErrors: true,
Run: func(c *cobra.Command, args []string) {
_ = c.Help()
},
}
params.AddParamMapToCmd(paramMap, tempHelpCmd, componentType, false)
// this is workaround to hide the help flag
tempHelpCmd.Flags().BoolP("help", "h", false, "Dummy help")
tempHelpCmd.Flags().Lookup("help").Hidden = true
tempHelpCmd.SetUsageTemplate(`
{{.Long}}
Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}
Flags:{{if .HasAvailableLocalFlags}}
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{else}}
No configuration flags available
{{end}}
`)
_ = tempHelpCmd.Execute()
}

View File

@ -1,281 +0,0 @@
package params
import (
"bytes"
"encoding/csv"
"fmt"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/spf13/cobra"
)
type cliParamType uint8
const (
String cliParamType = iota
StringList
StringOptionsList
Integer
Boolean
)
type CliParam struct {
Description string
Default interface{}
//this should always be an address to a value - as required by cobra
Value interface{}
Type cliParamType
}
type ParamMap map[string]map[string]CliParam
func NewParamMap(configs map[string]config.Configuration) ParamMap {
paramMap := make(ParamMap)
for name, configuration := range configs {
for _, param := range configuration.Params {
switch p := param.(type) {
case config.StringParam:
paramMap = paramMap.createStringParam(name, p)
case config.StringListParam:
paramMap = paramMap.createStringListParam(name, p)
case config.StringOptions:
paramMap = paramMap.createStringOptionsParam(name, p)
case config.IntParam:
paramMap = paramMap.createIntegerParam(name, p)
case config.BoolParam:
paramMap = paramMap.createBoolParam(name, p)
}
}
}
return paramMap
}
func (m ParamMap) createBoolParam(rootKey string, bParam config.BoolParam) ParamMap {
if _, ok := m[rootKey]; !ok {
m[rootKey] = make(map[string]CliParam)
}
var value bool
param := CliParam{
Description: bParam.Description(),
Default: bParam.Value,
Value: &value,
Type: Boolean,
}
m[rootKey][bParam.Name()] = param
return m
}
func (m ParamMap) createStringParam(rootKey string, sParam config.StringParam) ParamMap {
if _, ok := m[rootKey]; !ok {
m[rootKey] = make(map[string]CliParam)
}
var value string
param := CliParam{
Description: sParam.Description(),
Default: sParam.Value,
Value: &value,
Type: String,
}
m[rootKey][sParam.Name()] = param
return m
}
func (m ParamMap) createStringListParam(rootKey string, lParam config.StringListParam) ParamMap {
if _, ok := m[rootKey]; !ok {
m[rootKey] = make(map[string]CliParam)
}
var value []string
param := CliParam{
Description: lParam.Description(),
Default: lParam.Values,
Value: &value,
Type: StringList,
}
m[rootKey][lParam.Name()] = param
return m
}
func (m ParamMap) createStringOptionsParam(rootKey string, oParam config.StringOptions) ParamMap {
if _, ok := m[rootKey]; !ok {
m[rootKey] = make(map[string]CliParam)
}
value := StringOptionsVar{
Options: oParam.Options,
Value: oParam.Value,
}
param := CliParam{
Description: oParam.Description(),
Default: oParam.Value,
Value: &value,
Type: StringOptionsList,
}
m[rootKey][oParam.Name()] = param
return m
}
func (m ParamMap) createIntegerParam(rootKey string, iParam config.IntParam) ParamMap {
if _, ok := m[rootKey]; !ok {
m[rootKey] = make(map[string]CliParam)
}
var value int64
param := CliParam{
Description: iParam.Description(),
Default: iParam.Value,
Value: &value,
Type: Integer,
}
m[rootKey][iParam.Name()] = param
return m
}
func (c CliParam) GetConfigParam(name string) config.ConfigParam {
// this is a config being passed to a collector - required can be false
param := config.NewParam(name, c.Description, false)
switch c.Type {
case String:
return config.StringParam{
Param: param,
// values will be pointers
Value: *(c.Value.(*string)),
}
case StringList:
return config.StringListParam{
Param: param,
Values: *(c.Value.(*[]string)),
}
case StringOptionsList:
optionsVar := *(c.Value.(*StringOptionsVar))
return config.StringOptions{
Param: param,
Options: optionsVar.Options,
Value: optionsVar.Value,
}
case Integer:
return config.IntParam{
Param: param,
Value: *(c.Value.(*int64)),
}
case Boolean:
return config.BoolParam{
Param: param,
Value: *(c.Value.(*bool)),
}
}
return param
}
type StringOptionsVar struct {
Options []string
Value string
}
func (o StringOptionsVar) String() string {
return o.Value
}
func (o *StringOptionsVar) Set(p string) error {
isIncluded := func(opts []string, val string) bool {
for _, opt := range opts {
if val == opt {
return true
}
}
return false
}
if !isIncluded(o.Options, p) {
return fmt.Errorf("%s is not included in options: %v", p, o.Options)
}
o.Value = p
return nil
}
func (o *StringOptionsVar) Type() string {
return "string"
}
type StringSliceOptionsVar struct {
Options []string
Values []string
}
func (o StringSliceOptionsVar) String() string {
str, _ := writeAsCSV(o.Values)
return "[" + str + "]"
}
func (o *StringSliceOptionsVar) Set(val string) error {
values, err := readAsCSV(val)
if err != nil {
return err
}
vValues := utils.Distinct(values, o.Options)
if len(vValues) > 0 {
return fmt.Errorf("%v are not included in options: %v", vValues, o.Options)
}
o.Values = values
return nil
}
func (o *StringSliceOptionsVar) Type() string {
return "stringSlice"
}
func writeAsCSV(vals []string) (string, error) {
b := &bytes.Buffer{}
w := csv.NewWriter(b)
err := w.Write(vals)
if err != nil {
return "", err
}
w.Flush()
return strings.TrimSuffix(b.String(), "\n"), nil
}
func readAsCSV(val string) ([]string, error) {
if val == "" {
return []string{}, nil
}
stringReader := strings.NewReader(val)
csvReader := csv.NewReader(stringReader)
return csvReader.Read()
}
func AddParamMapToCmd(paramMap ParamMap, cmd *cobra.Command, prefix string, hide bool) {
for rootKey, childMap := range paramMap {
for childKey, value := range childMap {
paramName := fmt.Sprintf("%s.%s.%s", prefix, rootKey, childKey)
switch value.Type {
case String:
cmd.Flags().StringVar(value.Value.(*string), paramName, value.Default.(string), value.Description)
case StringList:
cmd.Flags().StringSliceVar(value.Value.(*[]string), paramName, value.Default.([]string), value.Description)
case StringOptionsList:
cmd.Flags().Var(value.Value.(*StringOptionsVar), paramName, value.Description)
case Integer:
cmd.Flags().Int64Var(value.Value.(*int64), paramName, value.Default.(int64), value.Description)
case Boolean:
cmd.Flags().BoolVar(value.Value.(*bool), paramName, value.Default.(bool), value.Description)
}
// this ensures flags from collectors and outputs are not shown as they will pollute the output
if hide {
_ = cmd.Flags().MarkHidden(paramName)
}
}
}
}
func ConvertParamsToConfig(paramMap ParamMap) map[string]config.Configuration {
configuration := make(map[string]config.Configuration)
for rootKey, childMap := range paramMap {
if _, ok := configuration[rootKey]; !ok {
configuration[rootKey] = config.Configuration{}
}
for childKey, value := range childMap {
configParam := value.GetConfigParam(childKey)
configuration[rootKey] = config.Configuration{Params: append(configuration[rootKey].Params, configParam)}
}
}
return configuration
}

View File

@ -1,247 +0,0 @@
package params_test
import (
"os"
"sort"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/cmd/params"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/spf13/cobra"
"github.com/stretchr/testify/require"
)
var conf = map[string]config.Configuration{
"config": {
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("directory", "A directory", false),
AllowEmpty: true,
},
},
},
"system": {
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Include tables", false),
},
config.StringListParam{
Values: []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"},
Param: config.NewParam("exclude_tables", "Excluded tables", false),
},
config.IntParam{
Value: 100000,
Param: config.NewParam("row_limit", "Max rows", false),
},
},
},
"reader": {
Params: []config.ConfigParam{
config.StringOptions{
Value: "csv",
Options: []string{"csv"},
Param: config.NewParam("format", "Format of imported files", false),
},
config.BoolParam{
Value: true,
Param: config.NewParam("collect_archives", "Collect archives", false),
},
},
},
}
func TestNewParamMap(t *testing.T) {
// test each of the types via NewParamMap - one with each type. the keys here can represent anything e.g. a collector name
t.Run("test param map correctly converts types", func(t *testing.T) {
paramMap := params.NewParamMap(conf)
require.Len(t, paramMap, 3)
// check config
require.Contains(t, paramMap, "config")
require.Len(t, paramMap["config"], 1)
require.Contains(t, paramMap["config"], "directory")
require.IsType(t, params.CliParam{}, paramMap["config"]["directory"])
require.Equal(t, "A directory", paramMap["config"]["directory"].Description)
require.Equal(t, "", *(paramMap["config"]["directory"].Value.(*string)))
require.Equal(t, "", paramMap["config"]["directory"].Default)
require.Equal(t, params.String, paramMap["config"]["directory"].Type)
// check system
require.Contains(t, paramMap, "system")
require.Len(t, paramMap["system"], 3)
require.IsType(t, params.CliParam{}, paramMap["system"]["include_tables"])
require.Equal(t, "Include tables", paramMap["system"]["include_tables"].Description)
var value []string
require.Equal(t, &value, paramMap["system"]["include_tables"].Value)
require.Equal(t, value, paramMap["system"]["include_tables"].Default)
require.Equal(t, params.StringList, paramMap["system"]["include_tables"].Type)
require.Equal(t, "Excluded tables", paramMap["system"]["exclude_tables"].Description)
require.IsType(t, params.CliParam{}, paramMap["system"]["exclude_tables"])
require.Equal(t, &value, paramMap["system"]["exclude_tables"].Value)
require.Equal(t, []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"}, paramMap["system"]["exclude_tables"].Default)
require.Equal(t, params.StringList, paramMap["system"]["exclude_tables"].Type)
require.Equal(t, "Max rows", paramMap["system"]["row_limit"].Description)
require.IsType(t, params.CliParam{}, paramMap["system"]["row_limit"])
var iValue int64
require.Equal(t, &iValue, paramMap["system"]["row_limit"].Value)
require.Equal(t, int64(100000), paramMap["system"]["row_limit"].Default)
require.Equal(t, params.Integer, paramMap["system"]["row_limit"].Type)
// check reader
require.Contains(t, paramMap, "reader")
require.Len(t, paramMap["reader"], 2)
require.IsType(t, params.CliParam{}, paramMap["reader"]["format"])
require.Equal(t, "Format of imported files", paramMap["reader"]["format"].Description)
require.IsType(t, params.CliParam{}, paramMap["reader"]["format"])
oValue := params.StringOptionsVar{
Options: []string{"csv"},
Value: "csv",
}
require.Equal(t, &oValue, paramMap["reader"]["format"].Value)
require.Equal(t, "csv", paramMap["reader"]["format"].Default)
require.Equal(t, params.StringOptionsList, paramMap["reader"]["format"].Type)
require.IsType(t, params.CliParam{}, paramMap["reader"]["collect_archives"])
require.Equal(t, "Collect archives", paramMap["reader"]["collect_archives"].Description)
require.IsType(t, params.CliParam{}, paramMap["reader"]["collect_archives"])
var bVar bool
require.Equal(t, &bVar, paramMap["reader"]["collect_archives"].Value)
require.Equal(t, true, paramMap["reader"]["collect_archives"].Default)
require.Equal(t, params.Boolean, paramMap["reader"]["collect_archives"].Type)
})
}
// test GetConfigParam
func TestConvertParamsToConfig(t *testing.T) {
paramMap := params.NewParamMap(conf)
t.Run("test we can convert a param map back to a config", func(t *testing.T) {
cParam := params.ConvertParamsToConfig(paramMap)
// these will not be equal as we have some information loss e.g. allowEmpty
//require.Equal(t, conf, cParam)
// deep equality
for name := range conf {
require.Equal(t, len(conf[name].Params), len(cParam[name].Params))
// sort both consistently
sort.Slice(conf[name].Params, func(i, j int) bool {
return conf[name].Params[i].Name() < conf[name].Params[j].Name()
})
sort.Slice(cParam[name].Params, func(i, j int) bool {
return cParam[name].Params[i].Name() < cParam[name].Params[j].Name()
})
for i, param := range conf[name].Params {
require.Equal(t, param.Required(), cParam[name].Params[i].Required())
require.Equal(t, param.Name(), cParam[name].Params[i].Name())
require.Equal(t, param.Description(), cParam[name].Params[i].Description())
}
}
})
}
// create via NewParamMap and add to command AddParamMapToCmd - check contents
func TestAddParamMapToCmd(t *testing.T) {
paramMap := params.NewParamMap(conf)
t.Run("test we can add hidden params to a command", func(t *testing.T) {
testComand := &cobra.Command{
Use: "test",
Short: "Run a test",
Long: `Longer description`,
Run: func(cmd *cobra.Command, args []string) {
os.Exit(0)
},
}
params.AddParamMapToCmd(paramMap, testComand, "collector", true)
// check we get an error on one which doesn't exist
_, err := testComand.Flags().GetString("collector.config.random")
require.NotNil(t, err)
// check getting incorrect type
_, err = testComand.Flags().GetString("collector.system.include_tables")
require.NotNil(t, err)
// check existence of all flags
directory, err := testComand.Flags().GetString("collector.config.directory")
require.Nil(t, err)
require.Equal(t, "", directory)
includeTables, err := testComand.Flags().GetStringSlice("collector.system.include_tables")
require.Nil(t, err)
require.Equal(t, []string{}, includeTables)
excludeTables, err := testComand.Flags().GetStringSlice("collector.system.exclude_tables")
require.Nil(t, err)
require.Equal(t, []string{"distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper"}, excludeTables)
rowLimit, err := testComand.Flags().GetInt64("collector.system.row_limit")
require.Nil(t, err)
require.Equal(t, int64(100000), rowLimit)
format, err := testComand.Flags().GetString("collector.reader.format")
require.Nil(t, err)
require.Equal(t, "csv", format)
collectArchives, err := testComand.Flags().GetBool("collector.reader.collect_archives")
require.Nil(t, err)
require.Equal(t, true, collectArchives)
})
}
// test StringOptionsVar
func TestStringOptionsVar(t *testing.T) {
t.Run("test we can set", func(t *testing.T) {
format := params.StringOptionsVar{
Options: []string{"csv", "tsv", "native"},
Value: "csv",
}
require.Equal(t, "csv", format.String())
err := format.Set("tsv")
require.Nil(t, err)
require.Equal(t, "tsv", format.String())
})
t.Run("test set invalid", func(t *testing.T) {
format := params.StringOptionsVar{
Options: []string{"csv", "tsv", "native"},
Value: "csv",
}
require.Equal(t, "csv", format.String())
err := format.Set("random")
require.NotNil(t, err)
require.Equal(t, "random is not included in options: [csv tsv native]", err.Error())
})
}
// test StringSliceOptionsVar
func TestStringSliceOptionsVar(t *testing.T) {
t.Run("test we can set", func(t *testing.T) {
formats := params.StringSliceOptionsVar{
Options: []string{"csv", "tsv", "native", "qsv"},
Values: []string{"csv", "tsv"},
}
require.Equal(t, "[csv,tsv]", formats.String())
err := formats.Set("tsv,native")
require.Nil(t, err)
require.Equal(t, "[tsv,native]", formats.String())
})
t.Run("test set invalid", func(t *testing.T) {
formats := params.StringSliceOptionsVar{
Options: []string{"csv", "tsv", "native", "qsv"},
Values: []string{"csv", "tsv"},
}
require.Equal(t, "[csv,tsv]", formats.String())
err := formats.Set("tsv,random")
require.NotNil(t, err)
require.Equal(t, "[random] are not included in options: [csv tsv native qsv]", err.Error())
err = formats.Set("msv,random")
require.NotNil(t, err)
require.Equal(t, "[msv random] are not included in options: [csv tsv native qsv]", err.Error())
})
}

View File

@ -1,174 +0,0 @@
package cmd
import (
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"strings"
"time"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"github.com/spf13/cobra"
"github.com/spf13/viper"
)
func enableDebug() {
if debug {
zerolog.SetGlobalLevel(zerolog.DebugLevel)
go func() {
err := http.ListenAndServe("localhost:8080", nil)
if err != nil {
log.Error().Err(err).Msg("unable to start debugger")
} else {
log.Debug().Msg("debugger has been started on port 8080")
}
}()
}
}
var rootCmd = &cobra.Command{
Use: "clickhouse-diagnostics",
Short: "Capture and convert ClickHouse diagnostic bundles.",
Long: `Captures ClickHouse diagnostic bundles to a number of supported formats, including file and ClickHouse itself. Converts bundles between formats.`,
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
enableDebug()
err := initializeConfig()
if err != nil {
log.Error().Err(err)
os.Exit(1)
}
return nil
},
Example: `clickhouse-diagnostics collect`,
}
const (
colorRed = iota + 31
colorGreen
colorYellow
colorMagenta = 35
colorBold = 1
)
const TimeFormat = time.RFC3339
var debug bool
var configFiles []string
const (
// The environment variable prefix of all environment variables bound to our command line flags.
// For example, --output is bound to CLICKHOUSE_DIAGNOSTIC_OUTPUT.
envPrefix = "CLICKHOUSE_DIAGNOSTIC"
)
func init() {
rootCmd.PersistentFlags().BoolVarP(&debug, "debug", "d", false, "Enable debug mode")
rootCmd.PersistentFlags().StringSliceVarP(&configFiles, "config", "f", []string{"clickhouse-diagnostics.yml", "/etc/clickhouse-diagnostics.yml"}, "Configuration file path")
// set a usage template to ensure flags on root are listed as global
rootCmd.SetUsageTemplate(`Usage:{{if .Runnable}}
{{.UseLine}}{{end}}{{if .HasAvailableSubCommands}}
{{.CommandPath}} [command]{{end}}{{if gt (len .Aliases) 0}}
Aliases:
{{.NameAndAliases}}{{end}}{{if .HasExample}}
Examples:
{{.Example}}{{end}}{{if .HasAvailableSubCommands}}
Available Commands:{{range .Commands}}{{if (or .IsAvailableCommand (eq .Name "help"))}}
{{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableLocalFlags}}
Global Flags:
{{.LocalFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .HasAvailableInheritedFlags}}
Additional help topics:{{range .Commands}}{{if .IsAdditionalHelpTopicCommand}}
{{rpad .CommandPath .CommandPathPadding}} {{.Short}}{{end}}{{end}}{{end}}{{if .HasAvailableSubCommands}}
Use "{{.CommandPath}} [command] --help" for more information about a command.{{end}}
`)
rootCmd.SetFlagErrorFunc(handleFlagErrors)
}
func Execute() {
// logs go to stderr - stdout is exclusive for outputs e.g. tables
output := zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: TimeFormat}
// override the colors
output.FormatLevel = func(i interface{}) string {
var l string
if ll, ok := i.(string); ok {
switch ll {
case zerolog.LevelTraceValue:
l = colorize("TRC", colorMagenta)
case zerolog.LevelDebugValue:
l = colorize("DBG", colorMagenta)
case zerolog.LevelInfoValue:
l = colorize("INF", colorGreen)
case zerolog.LevelWarnValue:
l = colorize(colorize("WRN", colorYellow), colorBold)
case zerolog.LevelErrorValue:
l = colorize(colorize("ERR", colorRed), colorBold)
case zerolog.LevelFatalValue:
l = colorize(colorize("FTL", colorRed), colorBold)
case zerolog.LevelPanicValue:
l = colorize(colorize("PNC", colorRed), colorBold)
default:
l = colorize("???", colorBold)
}
} else {
if i == nil {
l = colorize("???", colorBold)
} else {
l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3]
}
}
return l
}
output.FormatTimestamp = func(i interface{}) string {
tt := i.(string)
return colorize(tt, colorGreen)
}
log.Logger = log.Output(output)
zerolog.SetGlobalLevel(zerolog.InfoLevel)
rootCmd.SetHelpCommand(helpCmd)
if err := rootCmd.Execute(); err != nil {
log.Fatal().Err(err)
}
}
// colorize returns the string s wrapped in ANSI code c
func colorize(s interface{}, c int) string {
return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s)
}
func handleFlagErrors(cmd *cobra.Command, err error) error {
fmt.Println(colorize(colorize(fmt.Sprintf("Error: %s\n", err), colorRed), colorBold))
_ = cmd.Help()
os.Exit(1)
return nil
}
func initializeConfig() error {
// we use the first config file we find
var configFile string
for _, confFile := range configFiles {
if ok, _ := utils.FileExists(confFile); ok {
configFile = confFile
break
}
}
if configFile == "" {
log.Warn().Msgf("config file in %s not found - config file will be ignored", configFiles)
return nil
}
viper.SetConfigFile(configFile)
if err := viper.ReadInConfig(); err != nil {
return errors.Wrapf(err, "Unable to read configuration file at %s", configFile)
}
return nil
}

View File

@ -1,24 +0,0 @@
package cmd
import (
"fmt"
"github.com/spf13/cobra"
)
var (
Version = "" // set at compile time with -ldflags "-X versserv/cmd.Version=x.y.yz"
Commit = ""
)
func init() {
rootCmd.AddCommand(versionCmd)
}
var versionCmd = &cobra.Command{
Use: "version",
Short: "Print the version number of clickhouse-diagnostics",
Long: `All software has versions. This is clickhouse-diagnostics`,
Run: func(cmd *cobra.Command, args []string) {
fmt.Printf("Clickhouse Diagnostics %s (%s)\n", Version, Commit)
},
}

View File

@ -1,89 +0,0 @@
module github.com/ClickHouse/ClickHouse/programs/diagnostics
go 1.19
require (
github.com/ClickHouse/clickhouse-go/v2 v2.0.12
github.com/DATA-DOG/go-sqlmock v1.5.0
github.com/Masterminds/semver v1.5.0
github.com/bmatcuk/doublestar/v4 v4.0.2
github.com/docker/go-connections v0.4.0
github.com/elastic/gosigar v0.14.2
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
github.com/jaypipes/ghw v0.8.0
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f
github.com/mholt/archiver/v4 v4.0.0-alpha.4
github.com/olekukonko/tablewriter v0.0.5
github.com/pkg/errors v0.9.1
github.com/rs/zerolog v1.26.1
github.com/spf13/cobra v1.3.0
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.10.1
github.com/stretchr/testify v1.8.1
github.com/testcontainers/testcontainers-go v0.18.0
github.com/yargevad/filepathx v1.0.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
github.com/Microsoft/go-winio v0.5.2 // indirect
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
github.com/andybalholm/brotli v1.0.4 // indirect
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
github.com/containerd/containerd v1.6.17 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/distribution/distribution v2.8.2+incompatible // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/docker v23.0.0+incompatible // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/dsnet/compress v0.0.1 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/ghodss/yaml v1.0.0 // indirect
github.com/go-ole/go-ole v1.2.4 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/inconshreveable/mousetrap v1.0.0 // indirect
github.com/jaypipes/pcidb v0.6.0 // indirect
github.com/klauspost/compress v1.13.6 // indirect
github.com/klauspost/pgzip v1.2.5 // indirect
github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-runewidth v0.0.9 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/mitchellh/mapstructure v1.4.3 // indirect
github.com/moby/patternmatcher v0.5.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
github.com/opencontainers/runc v1.1.3 // indirect
github.com/paulmach/orb v0.4.0 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pierrec/lz4/v4 v4.1.14 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/spf13/afero v1.8.0 // indirect
github.com/spf13/cast v1.4.1 // indirect
github.com/spf13/jwalterweatherman v1.1.0 // indirect
github.com/subosito/gotenv v1.2.0 // indirect
github.com/therootcompany/xz v1.0.1 // indirect
github.com/ulikunitz/xz v0.5.10 // indirect
go.opentelemetry.io/otel v1.4.1 // indirect
go.opentelemetry.io/otel/trace v1.4.1 // indirect
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c // indirect
golang.org/x/sys v0.5.0 // indirect
golang.org/x/text v0.7.0 // indirect
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/ini.v1 v1.66.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
)

View File

@ -1,992 +0,0 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.98.0/go.mod h1:ua6Ush4NALrHk5QXDWnjvZHN93OuF0HfuEPq9I1X0cM=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/firestore v1.6.1/go.mod h1:asNXNOzBdyVQmEU+ggO8UPodTkEVFW5Qx+rwHnAz+EY=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.5.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/ClickHouse/clickhouse-go/v2 v2.0.12 h1:Nbl/NZwoM6LGJm7smNBgvtdr/rxjlIssSW3eG/Nmb9E=
github.com/ClickHouse/clickhouse-go/v2 v2.0.12/go.mod h1:u4RoNQLLM2W6hNSPYrIESLJqaWSInZVmfM+MlaAhXcg=
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/hcsshim v0.9.6 h1:VwnDOgLeoi2du6dAznfmspNqTiwczvjv4K7NxuY9jsY=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY=
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/bmatcuk/doublestar/v4 v4.0.2 h1:X0krlUVAVmtr2cRoTqR8aDMrDqnB36ht8wpWTiQ3jsA=
github.com/bmatcuk/doublestar/v4 v4.0.2/go.mod h1:xBQ8jztBU6kakFMg+8WGxn0c6z1fTSPVIjEY1Wr7jzc=
github.com/cenkalti/backoff/v4 v4.2.0 h1:HN5dHm3WBOgndBH6E8V0q2jIYIR3s9yglV8k/+MN3u4=
github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/containerd/containerd v1.6.17 h1:XDnJIeJW0cLf6v7/+N+6L9kGrChHeXekZp2VHu6OpiY=
github.com/containerd/containerd v1.6.17/go.mod h1:1RdCUu95+gc2v9t3IL+zIlpClSmew7/0YS8O5eQZrOw=
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/distribution/distribution v2.8.2+incompatible h1:k9+4DKdOG+quPFZXT/mUsiQrGu9vYCp+dXpuPkuqhk8=
github.com/distribution/distribution v2.8.2+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dsnet/compress v0.0.1 h1:PlZu0n3Tuv04TzpfPbrnI0HW/YwodEXDS+oPKahKF0Q=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY=
github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4=
github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g=
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48=
github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ=
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/jaypipes/ghw v0.8.0 h1:02q1pTm9CD83vuhBsEZZhOCS128pq87uyaQeJZkp3sQ=
github.com/jaypipes/ghw v0.8.0/go.mod h1:+gR9bjm3W/HnFi90liF+Fj9GpCe/Dsibl9Im8KmC7c4=
github.com/jaypipes/pcidb v0.6.0 h1:VIM7GKVaW4qba30cvB67xSCgJPTzkG8Kzw/cbs5PHWU=
github.com/jaypipes/pcidb v0.6.0/go.mod h1:L2RGk04sfRhp5wvHO0gfRAMoLY/F3PKv/nwJeVoho0o=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE=
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f h1:B0OD7nYl2FPQEVrw8g2uyc1lGEzNbvrKh7fspGZcbvY=
github.com/matishsiao/goInfo v0.0.0-20210923090445-da2e3fa8d45f/go.mod h1:aEt7p9Rvh67BYApmZwNDPpgircTO2kgdmDUoF/1QmwA=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mholt/archiver/v4 v4.0.0-alpha.4 h1:QJ4UuWgavPynEX3LXxClHDRGzYcgcvTtAMp8az7spuw=
github.com/mholt/archiver/v4 v4.0.0-alpha.4/go.mod h1:J7SYS/UTAtnO3I49RQEf+2FYZVwo7XBOh9Im43VrjNs=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM=
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
github.com/moby/patternmatcher v0.5.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f h1:J/7hjLaHLD7epG0m6TBMGmp4NQ+ibBYLfeyJWdAIFLA=
github.com/moby/term v0.0.0-20221128092401-c43b287e0e0f/go.mod h1:15ce4BGCFxt7I5NQKT+HV0yEDxmf6fSysfEDiVo3zFM=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0-rc2 h1:2zx/Stx4Wc5pIPDvIxHXvXtQFW/7XWJGmnM7r3wg034=
github.com/opencontainers/image-spec v1.1.0-rc2/go.mod h1:3OVijpioIKYWTqjiG0zfF6wvoJ4fAXGbjdZuI2NgsRQ=
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/paulmach/orb v0.4.0 h1:ilp1MQjRapLJ1+qcays1nZpe0mvkCY+b8JU/qBKRZ1A=
github.com/paulmach/orb v0.4.0/go.mod h1:FkcWtplUAIVqAuhAOV2d3rpbnQyliDOjOcLW9dUrfdU=
github.com/paulmach/protoscan v0.2.1-0.20210522164731-4e53c6875432/go.mod h1:2sV+uZ/oQh66m4XJVZm5iqUZ62BN88Ex1E+TTS0nLzI=
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pierrec/lz4/v4 v4.1.14 h1:+fL8AQEZtz/ijeNnpduH0bROTu0O3NZAlPjQxGn8LwE=
github.com/pierrec/lz4/v4 v4.1.14/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.0 h1:trlNQbNUG3OdDrDil03MCb1H2o9nJ1x4/5LYw7byDE0=
github.com/sirupsen/logrus v1.9.0/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/testcontainers/testcontainers-go v0.18.0 h1:8RXrcIQv5xX/uBOSmZd297gzvA7F0yuRA37/918o7Yg=
github.com/testcontainers/testcontainers-go v0.18.0/go.mod h1:rLC7hR2SWRjJZZNrUYiTKvUXCziNxzZiYtz9icTWYNQ=
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
go.etcd.io/etcd/client/v2 v2.305.1/go.mod h1:pMEacxZW7o8pg4CrFE7pquyCJJzZvkvdD2RibOCCCGs=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v1.4.1 h1:QbINgGDDcoQUoMJa2mMaWno49lja9sHwp6aoa2n3a4g=
go.opentelemetry.io/otel v1.4.1/go.mod h1:StM6F/0fSwpd8dKWDCdRr7uRvEPYdW0hBSlbdTiUde4=
go.opentelemetry.io/otel/trace v1.4.1 h1:O+16qcdTrT7zxv2J6GejTPFinSwA++cYerC5iSiF8EQ=
go.opentelemetry.io/otel/trace v1.4.1/go.mod h1:iYEVbroFCNut9QkwEczV9vMRPHNKSSwYZjulEtsmhFc=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191220220014-0732a990476f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211028162531-8db9c33dc351/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U=
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/ini.v1 v1.66.2 h1:XfR1dOYubytKy4Shzc2LHrrGhU0lDCfDGG1yLPmpgsI=
gopkg.in/ini.v1 v1.66.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=

View File

@ -1,113 +0,0 @@
package clickhouse
import (
"fmt"
"path/filepath"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/pkg/errors"
)
type ConfigCollector struct {
resourceManager *platform.ResourceManager
}
func NewConfigCollector(m *platform.ResourceManager) *ConfigCollector {
return &ConfigCollector{
resourceManager: m,
}
}
const DefaultConfigLocation = "/etc/clickhouse-server/"
const ProcessedConfigurationLocation = "/var/lib/clickhouse/preprocessed_configs"
func (c ConfigCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(c.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
directory, err := config.ReadStringValue(conf, "directory")
if err != nil {
return &data.DiagnosticBundle{}, err
}
if directory != "" {
// user has specified a directory - we therefore skip all other efforts to locate the config
frame, errs := data.NewConfigFileFrame(directory)
return &data.DiagnosticBundle{
Frames: map[string]data.Frame{
"user_specified": frame,
},
Errors: data.FrameErrors{Errors: errs},
}, nil
}
configCandidates, err := FindConfigurationFiles()
if err != nil {
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Unable to find configuration files")
}
frames := make(map[string]data.Frame)
var frameErrors []error
for frameName, confDir := range configCandidates {
frame, errs := data.NewConfigFileFrame(confDir)
frameErrors = append(frameErrors, errs...)
frames[frameName] = frame
}
return &data.DiagnosticBundle{
Frames: frames,
Errors: data.FrameErrors{Errors: frameErrors},
}, err
}
func FindConfigurationFiles() (map[string]string, error) {
configCandidates := map[string]string{
"default": DefaultConfigLocation,
"preprocessed": ProcessedConfigurationLocation,
}
// we don't know specifically where the config is but try to find via processes
processConfigs, err := utils.FindConfigsFromClickHouseProcesses()
if err != nil {
return nil, err
}
for i, path := range processConfigs {
confDir := filepath.Dir(path)
if len(processConfigs) == 1 {
configCandidates["process"] = confDir
break
}
configCandidates[fmt.Sprintf("process_%d", i)] = confDir
}
return configCandidates, nil
}
func (c ConfigCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("directory", "Specify the location of the configuration files for ClickHouse Server e.g. /etc/clickhouse-server/", false),
AllowEmpty: true,
},
},
}
}
func (c ConfigCollector) Description() string {
return "Collects the ClickHouse configuration from the local filesystem."
}
func (c ConfigCollector) IsDefault() bool {
return true
}
// here we register the collector for use
func init() {
collectors.Register("config", func() (collectors.Collector, error) {
return &ConfigCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,128 +0,0 @@
package clickhouse_test
import (
"encoding/xml"
"fmt"
"io"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestConfigConfiguration(t *testing.T) {
t.Run("correct configuration is returned for config collector", func(t *testing.T) {
configCollector := clickhouse.NewConfigCollector(&platform.ResourceManager{})
conf := configCollector.Configuration()
require.Len(t, conf.Params, 1)
// check first param
require.IsType(t, config.StringParam{}, conf.Params[0])
directory, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.False(t, directory.Required())
require.Equal(t, directory.Name(), "directory")
require.Equal(t, "", directory.Value)
})
}
func TestConfigCollect(t *testing.T) {
configCollector := clickhouse.NewConfigCollector(&platform.ResourceManager{})
t.Run("test default file collector configuration", func(t *testing.T) {
diagSet, err := configCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
// we won't be able to collect the default configs preprocessed and default - even if clickhouse is installed
// these directories should not be readable under any permissions these tests are unrealistically executed!
// note: we may also pick up configs from a local clickhouse process - we thus allow a len >=2 but don't check this
// as its non-deterministic
require.GreaterOrEqual(t, len(diagSet.Frames), 2)
// check default key
require.Contains(t, diagSet.Frames, "default")
require.Equal(t, diagSet.Frames["default"].Name(), "/etc/clickhouse-server/")
require.Equal(t, diagSet.Frames["default"].Columns(), []string{"config"})
// collection will have failed
checkFrame(t, diagSet.Frames["default"], nil)
// check preprocessed key
require.Contains(t, diagSet.Frames, "preprocessed")
require.Equal(t, diagSet.Frames["preprocessed"].Name(), "/var/lib/clickhouse/preprocessed_configs")
require.Equal(t, diagSet.Frames["preprocessed"].Columns(), []string{"config"})
// min of 2 - might be more if a local installation of clickhouse is running
require.GreaterOrEqual(t, len(diagSet.Errors.Errors), 2)
})
t.Run("test configuration when specified", func(t *testing.T) {
// create some test files
tempDir := t.TempDir()
confDir := path.Join(tempDir, "conf")
// create an includes file
includesDir := path.Join(tempDir, "includes")
err := os.MkdirAll(includesDir, os.ModePerm)
require.Nil(t, err)
includesPath := path.Join(includesDir, "random.xml")
includesFile, err := os.Create(includesPath)
require.Nil(t, err)
xmlWriter := io.Writer(includesFile)
enc := xml.NewEncoder(xmlWriter)
enc.Indent(" ", " ")
xmlConfig := data.XmlConfig{
XMLName: xml.Name{},
Clickhouse: data.XmlLoggerConfig{
XMLName: xml.Name{},
ErrorLog: "/var/log/clickhouse-server/clickhouse-server.err.log",
Log: "/var/log/clickhouse-server/clickhouse-server.log",
},
IncludeFrom: "",
}
err = enc.Encode(xmlConfig)
require.Nil(t, err)
// create 5 temporary config files - length is 6 for the included file
rows := make([][]interface{}, 6)
for i := 0; i < 5; i++ {
if i == 4 {
// set the includes for the last doc
xmlConfig.IncludeFrom = includesPath
}
// we want to check hierarchies are walked so create a simple folder for each file
fileDir := path.Join(confDir, fmt.Sprintf("%d", i))
err := os.MkdirAll(fileDir, os.ModePerm)
require.Nil(t, err)
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.xml", i))
row := make([]interface{}, 1)
row[0] = data.XmlConfigFile{Path: filepath}
rows[i] = row
xmlFile, err := os.Create(filepath)
require.Nil(t, err)
// write a little xml so its valid
xmlConfig := xmlConfig
xmlWriter := io.Writer(xmlFile)
enc := xml.NewEncoder(xmlWriter)
enc.Indent(" ", " ")
err = enc.Encode(xmlConfig)
require.Nil(t, err)
}
diagSet, err := configCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: confDir,
Param: config.NewParam("directory", "File locations", false),
},
},
})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Frames, 1)
require.Contains(t, diagSet.Frames, "user_specified")
require.Equal(t, diagSet.Frames["user_specified"].Name(), confDir)
require.Equal(t, diagSet.Frames["user_specified"].Columns(), []string{"config"})
iConf := make([]interface{}, 1)
iConf[0] = data.XmlConfigFile{Path: includesPath, Included: true}
rows[5] = iConf
checkFrame(t, diagSet.Frames["user_specified"], rows)
})
}

View File

@ -1,108 +0,0 @@
package clickhouse
import (
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
)
type DBLogTable struct {
orderBy data.OrderBy
excludeColumns []string
}
var DbLogTables = map[string]DBLogTable{
"query_log": {
orderBy: data.OrderBy{
Column: "event_time_microseconds",
Order: data.Asc,
},
excludeColumns: []string{},
},
"query_thread_log": {
orderBy: data.OrderBy{
Column: "event_time_microseconds",
Order: data.Asc,
},
excludeColumns: []string{},
},
"text_log": {
orderBy: data.OrderBy{
Column: "event_time_microseconds",
Order: data.Asc,
},
excludeColumns: []string{},
},
}
// This collector collects db logs
type DBLogsCollector struct {
resourceManager *platform.ResourceManager
}
func NewDBLogsCollector(m *platform.ResourceManager) *DBLogsCollector {
return &DBLogsCollector{
resourceManager: m,
}
}
func (dc *DBLogsCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(dc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
rowLimit, err := config.ReadIntValue(conf, "row_limit")
if err != nil {
return &data.DiagnosticBundle{}, err
}
frames := make(map[string]data.Frame)
var frameErrors []error
for logTable, tableConfig := range DbLogTables {
frame, err := dc.resourceManager.DbClient.ReadTable("system", logTable, tableConfig.excludeColumns, tableConfig.orderBy, rowLimit)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to collect %s", logTable))
} else {
frames[logTable] = frame
}
}
fErrors := data.FrameErrors{
Errors: frameErrors,
}
return &data.DiagnosticBundle{
Frames: frames,
Errors: fErrors,
}, nil
}
func (dc *DBLogsCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: 100000,
Param: config.NewParam("row_limit", "Maximum number of log rows to collect. Negative values mean unlimited", false),
},
},
}
}
func (dc *DBLogsCollector) IsDefault() bool {
return true
}
func (dc DBLogsCollector) Description() string {
return "Collects the ClickHouse logs directly from the database."
}
// here we register the collector for use
func init() {
collectors.Register("db_logs", func() (collectors.Collector, error) {
return &DBLogsCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,119 +0,0 @@
package clickhouse_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
func TestDbLogsConfiguration(t *testing.T) {
t.Run("correct configuration is returned for summary collector", func(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
dbLogsCollector := clickhouse.NewDBLogsCollector(&platform.ResourceManager{
DbClient: client,
})
conf := dbLogsCollector.Configuration()
require.Len(t, conf.Params, 1)
require.IsType(t, config.IntParam{}, conf.Params[0])
rowLimit, ok := conf.Params[0].(config.IntParam)
require.True(t, ok)
require.False(t, rowLimit.Required())
require.Equal(t, rowLimit.Name(), "row_limit")
require.Equal(t, int64(100000), rowLimit.Value)
})
}
func TestDbLogsCollect(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
dbLogsCollector := clickhouse.NewDBLogsCollector(&platform.ResourceManager{
DbClient: client,
})
queryLogColumns := []string{"type", "event_date", "event_time", "event_time_microseconds",
"query_start_time", "query_start_time_microseconds", "query_duration_ms", "read_rows", "read_bytes", "written_rows", "written_bytes",
"result_rows", "result_bytes", "memory_usage", "current_database", "query", "formatted_query", "normalized_query_hash",
"query_kind", "databases", "tables", "columns", "projections", "views", "exception_code", "exception", "stack_trace",
"is_initial_query", "user", "query_id", "address", "port", "initial_user", "initial_query_id", "initial_address", "initial_port",
"initial_query_start_time", "initial_query_start_time_microseconds", "interface", "os_user", "client_hostname", "client_name",
"client_revision", "client_version_major", "client_version_minor", "client_version_patch", "http_method", "http_user_agent",
"http_referer", "forwarded_for", "quota_key", "revision", "log_comment", "thread_ids", "ProfileEvents", "Settings",
"used_aggregate_functions", "used_aggregate_function_combinators", "used_database_engines", "used_data_type_families",
"used_dictionaries", "used_formats", "used_functions", "used_storages", "used_table_functions"}
queryLogFrame := test.NewFakeDataFrame("queryLog", queryLogColumns,
[][]interface{}{
{"QueryStart", "2021-12-13", "2021-12-13 12:53:20", "2021-12-13 12:53:20.590579", "2021-12-13 12:53:20", "2021-12-13 12:53:20.590579", "0", "0", "0", "0", "0", "0", "0", "0", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[]", "{}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]"},
{"QueryFinish", "2021-12-13", "2021-12-13 12:53:30", "2021-12-13 12:53:30.607292", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "15", "4512", "255694", "0", "0", "4358", "173248", "4415230", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "default", "3b5feb6d-3086-4718-adb2-17464988ff12", "::ffff:127.0.0.1", "50920", "2021-12-13 12:53:30", "2021-12-13 12:53:30.590579", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[95298,95315,95587,95316,95312,95589,95318,95586,95588,95585]", "{'Query':1,'SelectQuery':1,'ArenaAllocChunks':41,'ArenaAllocBytes':401408,'FunctionExecute':62,'NetworkSendElapsedMicroseconds':463,'NetworkSendBytes':88452,'SelectedRows':4512,'SelectedBytes':255694,'RegexpCreated':6,'ContextLock':411,'RWLockAcquiredReadLocks':190,'RealTimeMicroseconds':49221,'UserTimeMicroseconds':19811,'SystemTimeMicroseconds':2817,'SoftPageFaults':1128,'OSCPUWaitMicroseconds':127,'OSCPUVirtualTimeMicroseconds':22624,'OSWriteBytes':12288,'OSWriteChars':13312}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "['concat','notEmpty','extractAll']", "[]", "[]"},
{"QueryStart", "2021-12-13", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "0", "0", "0", "0", "0", "0", "0", "0", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[]", "{}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]", "[]"},
{"QueryFinish", "2021-12-13", "2021-12-13 13:02:56", "2021-12-13 13:02:56.437115", "2021-12-13 13:02:56", "2021-12-13 13:02:56.419528", "16", "4629", "258376", "0", "0", "4377", "174272", "4404694", "default", "SELECT DISTINCT arrayJoin(extractAll(name, '[\\w_]{2,}')) AS res FROM (SELECT name FROM system.functions UNION ALL SELECT name FROM system.table_engines UNION ALL SELECT name FROM system.formats UNION ALL SELECT name FROM system.table_functions UNION ALL SELECT name FROM system.data_type_families UNION ALL SELECT name FROM system.merge_tree_settings UNION ALL SELECT name FROM system.settings UNION ALL SELECT cluster FROM system.clusters UNION ALL SELECT macro FROM system.macros UNION ALL SELECT policy_name FROM system.storage_policies UNION ALL SELECT concat(func.name, comb.name) FROM system.functions AS func CROSS JOIN system.aggregate_function_combinators AS comb WHERE is_aggregate UNION ALL SELECT name FROM system.databases LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.tables LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.dictionaries LIMIT 10000 UNION ALL SELECT DISTINCT name FROM system.columns LIMIT 10000) WHERE notEmpty(res)", "", "6666026786019643712", "Select", "['system']", "['system.aggregate_function_combinators','system.clusters','system.columns','system.data_type_families','system.databases','system.dictionaries','system.formats','system.functions','system.macros','system.merge_tree_settings','system.settings','system.storage_policies','system.table_engines','system.table_functions','system.tables']", "['system.aggregate_function_combinators.name','system.clusters.cluster','system.columns.name','system.data_type_families.name','system.databases.name','system.dictionaries.name','system.formats.name','system.functions.is_aggregate','system.functions.name','system.macros.macro','system.merge_tree_settings.name','system.settings.name','system.storage_policies.policy_name','system.table_engines.name','system.table_functions.name','system.tables.name']", "[]", "[]", "0", "", "", "1", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "default", "351b58e4-6128-47d4-a7b8-03d78c1f84c6", "::ffff:127.0.0.1", "50968", "2021-12-13 13:02:53", "2021-12-13 13:02:53.419528", "1", "", "", "ClickHouse client", "54450", "21", "11", "0", "0", "", "", "", "", "54456", "", "[95298,95318,95315,95316,95312,95588,95589,95586,95585,95587]", "{'Query':1,'SelectQuery':1,'ArenaAllocChunks':41,'ArenaAllocBytes':401408,'FunctionExecute':62,'NetworkSendElapsedMicroseconds':740,'NetworkSendBytes':88794,'SelectedRows':4629,'SelectedBytes':258376,'ContextLock':411,'RWLockAcquiredReadLocks':194,'RealTimeMicroseconds':52469,'UserTimeMicroseconds':17179,'SystemTimeMicroseconds':4218,'SoftPageFaults':569,'OSCPUWaitMicroseconds':303,'OSCPUVirtualTimeMicroseconds':25087,'OSWriteBytes':12288,'OSWriteChars':12288}", "{'load_balancing':'random','max_memory_usage':'10000000000'}", "[]", "[]", "[]", "[]", "[]", "[]", "['concat','notEmpty','extractAll']", "[]", "[]"},
})
client.QueryResponses["SELECT * FROM system.query_log ORDER BY event_time_microseconds ASC LIMIT 100000"] = &queryLogFrame
textLogColumns := []string{"event_date", "event_time", "event_time_microseconds", "microseconds", "thread_name", "thread_id", "level", "query_id", "logger_name", "message", "revision", "source_file", "source_line"}
textLogFrame := test.NewFakeDataFrame("textLog", textLogColumns,
[][]interface{}{
{"2022-02-03", "2022-02-03 16:17:47", "2022-02-03 16:37:17.056950", "56950", "clickhouse-serv", "68947", "Information", "", "DNSCacheUpdater", "Update period 15 seconds", "54458", "../src/Interpreters/DNSCacheUpdater.cpp; void DB::DNSCacheUpdater::start()", "46"},
{"2022-02-03", "2022-02-03 16:27:47", "2022-02-03 16:37:27.057022", "57022", "clickhouse-serv", "68947", "Information", "", "Application", "Available RAM: 62.24 GiB; physical cores: 8; logical cores: 16.", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1380"},
{"2022-02-03", "2022-02-03 16:37:47", "2022-02-03 16:37:37.057484", "57484", "clickhouse-serv", "68947", "Information", "", "Application", "Listening for http://[::1]:8123", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1444"},
{"2022-02-03", "2022-02-03 16:47:47", "2022-02-03 16:37:47.057527", "57527", "clickhouse-serv", "68947", "Information", "", "Application", "Listening for native protocol (tcp): [::1]:9000", "54458", "../programs/server/Server.cpp; virtual int DB::Server::main(const std::vector<std::string> &)", "1444"},
})
client.QueryResponses["SELECT * FROM system.text_log ORDER BY event_time_microseconds ASC LIMIT 100000"] = &textLogFrame
// skip query_thread_log frame - often it doesn't exist anyway unless enabled
t.Run("test default db logs collection", func(t *testing.T) {
bundle, errs := dbLogsCollector.Collect(config.Configuration{})
require.Empty(t, errs)
require.NotNil(t, bundle)
require.Len(t, bundle.Frames, 2)
require.Contains(t, bundle.Frames, "text_log")
require.Contains(t, bundle.Frames, "query_log")
require.Len(t, bundle.Errors.Errors, 1)
// check query_log frame
require.Contains(t, bundle.Frames, "query_log")
require.Equal(t, queryLogColumns, bundle.Frames["query_log"].Columns())
checkFrame(t, bundle.Frames["query_log"], queryLogFrame.Rows)
//check text_log frame
require.Contains(t, bundle.Frames, "text_log")
require.Equal(t, textLogColumns, bundle.Frames["text_log"].Columns())
checkFrame(t, bundle.Frames["text_log"], textLogFrame.Rows)
client.Reset()
})
t.Run("test db logs collection with limit", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: 1,
Param: config.NewParam("row_limit", "Maximum number of log rows to collect. Negative values mean unlimited", false),
},
},
}
bundle, err := dbLogsCollector.Collect(conf)
require.Empty(t, err)
require.NotNil(t, bundle)
require.Len(t, bundle.Frames, 0)
require.Len(t, bundle.Errors.Errors, 3)
// populate client
client.QueryResponses["SELECT * FROM system.query_log ORDER BY event_time_microseconds ASC LIMIT 1"] = &queryLogFrame
client.QueryResponses["SELECT * FROM system.text_log ORDER BY event_time_microseconds ASC LIMIT 1"] = &textLogFrame
bundle, err = dbLogsCollector.Collect(conf)
require.Empty(t, err)
require.Len(t, bundle.Frames, 2)
require.Len(t, bundle.Errors.Errors, 1)
require.Contains(t, bundle.Frames, "text_log")
require.Contains(t, bundle.Frames, "query_log")
// check query_log frame
require.Contains(t, bundle.Frames, "query_log")
require.Equal(t, queryLogColumns, bundle.Frames["query_log"].Columns())
checkFrame(t, bundle.Frames["query_log"], queryLogFrame.Rows[:1])
//check text_log frame
require.Contains(t, bundle.Frames, "text_log")
require.Equal(t, textLogColumns, bundle.Frames["text_log"].Columns())
checkFrame(t, bundle.Frames["text_log"], textLogFrame.Rows[:1])
client.Reset()
})
}

View File

@ -1,140 +0,0 @@
package clickhouse
import (
"fmt"
"path/filepath"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
)
// This collector collects logs
type LogsCollector struct {
resourceManager *platform.ResourceManager
}
func NewLogsCollector(m *platform.ResourceManager) *LogsCollector {
return &LogsCollector{
resourceManager: m,
}
}
var DefaultLogsLocation = filepath.Clean("/var/log/clickhouse-server/")
func (lc *LogsCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(lc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
directory, err := config.ReadStringValue(conf, "directory")
if err != nil {
return &data.DiagnosticBundle{}, err
}
collectArchives, err := config.ReadBoolValue(conf, "collect_archives")
if err != nil {
return &data.DiagnosticBundle{}, err
}
logPatterns := []string{"*.log"}
if collectArchives {
logPatterns = append(logPatterns, "*.gz")
}
if directory != "" {
// user has specified a directory - we therefore skip all other efforts to locate the logs
frame, errs := data.NewFileDirectoryFrame(directory, logPatterns)
return &data.DiagnosticBundle{
Frames: map[string]data.Frame{
"user_specified": frame,
},
Errors: data.FrameErrors{Errors: errs},
}, nil
}
// add the default
frames := make(map[string]data.Frame)
dirFrame, frameErrors := data.NewFileDirectoryFrame(DefaultLogsLocation, logPatterns)
frames["default"] = dirFrame
logFolders, errs := FindLogFileCandidates()
frameErrors = append(frameErrors, errs...)
i := 0
for folder, paths := range logFolders {
// we will collect the default location anyway above so skip these
if folder != DefaultLogsLocation {
if collectArchives {
paths = append(paths, "*.gz")
}
dirFrame, errs := data.NewFileDirectoryFrame(folder, paths)
frames[fmt.Sprintf("logs-%d", i)] = dirFrame
frameErrors = append(frameErrors, errs...)
}
}
return &data.DiagnosticBundle{
Frames: frames,
Errors: data.FrameErrors{Errors: frameErrors},
}, err
}
func (lc *LogsCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
AllowEmpty: true,
},
config.BoolParam{
Param: config.NewParam("collect_archives", "Collect compressed log archive files", false),
},
},
}
}
func FindLogFileCandidates() (logFolders map[string][]string, configErrors []error) {
// we need the config to determine the location of the logs
configCandidates := make(map[string]data.ConfigFileFrame)
configFiles, err := FindConfigurationFiles()
logFolders = make(map[string][]string)
if err != nil {
configErrors = append(configErrors, err)
return logFolders, configErrors
}
for _, folder := range configFiles {
configFrame, errs := data.NewConfigFileFrame(folder)
configErrors = append(configErrors, errs...)
configCandidates[filepath.Clean(folder)] = configFrame
}
for _, config := range configCandidates {
paths, errs := config.FindLogPaths()
for _, path := range paths {
folder := filepath.Dir(path)
filename := filepath.Base(path)
if _, ok := logFolders[folder]; !ok {
logFolders[folder] = []string{}
}
logFolders[folder] = utils.Unique(append(logFolders[folder], filename))
}
configErrors = append(configErrors, errs...)
}
return logFolders, configErrors
}
func (lc *LogsCollector) IsDefault() bool {
return true
}
func (lc LogsCollector) Description() string {
return "Collects the ClickHouse logs directly from the database."
}
// here we register the collector for use
func init() {
collectors.Register("logs", func() (collectors.Collector, error) {
return &LogsCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,147 +0,0 @@
package clickhouse_test
import (
"fmt"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
func TestLogsConfiguration(t *testing.T) {
t.Run("correct configuration is returned for logs collector", func(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
logsCollector := clickhouse.NewLogsCollector(&platform.ResourceManager{
DbClient: client,
})
conf := logsCollector.Configuration()
require.Len(t, conf.Params, 2)
// check directory
require.IsType(t, config.StringParam{}, conf.Params[0])
directory, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.False(t, directory.Required())
require.Equal(t, directory.Name(), "directory")
require.Empty(t, directory.Value)
// check collect_archives
require.IsType(t, config.BoolParam{}, conf.Params[1])
collectArchives, ok := conf.Params[1].(config.BoolParam)
require.True(t, ok)
require.False(t, collectArchives.Required())
require.Equal(t, collectArchives.Name(), "collect_archives")
require.False(t, collectArchives.Value)
})
}
func TestLogsCollect(t *testing.T) {
logsCollector := clickhouse.NewLogsCollector(&platform.ResourceManager{})
t.Run("test default logs collection", func(t *testing.T) {
// we can't rely on a local installation of clickhouse being present for tests - if it is present (and running)
// results maybe variable e.g. we may find a config. For now, we allow flexibility and test only default.
// TODO: we may want to test this within a container
bundle, err := logsCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, bundle)
// we will have some errors if clickhouse is installed or not. If former, permission issues - if latter missing folders.
require.Greater(t, len(bundle.Errors.Errors), 0)
require.Len(t, bundle.Frames, 1)
require.Contains(t, bundle.Frames, "default")
_, ok := bundle.Frames["default"].(data.DirectoryFileFrame)
require.True(t, ok)
// no guarantees clickhouse is installed so this bundle could have no frames
})
t.Run("test logs collection when directory is specified", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
logsPath := path.Join(cwd, "../../../testdata", "logs", "var", "logs")
bundle, err := logsCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: logsPath,
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
AllowEmpty: true,
},
},
})
require.Nil(t, err)
checkDirectoryBundle(t, bundle, logsPath, []string{"clickhouse-server.log", "clickhouse-server.err.log"})
})
t.Run("test logs collection of archives", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
logsPath := path.Join(cwd, "../../../testdata", "logs", "var", "logs")
bundle, err := logsCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: logsPath,
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
AllowEmpty: true,
},
config.BoolParam{
Value: true,
Param: config.NewParam("collect_archives", "Collect compressed log archive files", false),
},
},
})
require.Nil(t, err)
checkDirectoryBundle(t, bundle, logsPath, []string{"clickhouse-server.log", "clickhouse-server.err.log", "clickhouse-server.log.gz"})
})
t.Run("test when directory does not exist", func(t *testing.T) {
tmpDir := t.TempDir()
logsPath := path.Join(tmpDir, "random")
bundle, err := logsCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: logsPath,
Param: config.NewParam("directory", "Specify the location of the log files for ClickHouse Server e.g. /var/log/clickhouse-server/", false),
AllowEmpty: true,
},
},
})
// not a fatal error currently
require.Nil(t, err)
require.Len(t, bundle.Errors.Errors, 1)
require.Equal(t, fmt.Sprintf("directory %s does not exist", logsPath), bundle.Errors.Errors[0].Error())
})
}
func checkDirectoryBundle(t *testing.T, bundle *data.DiagnosticBundle, logsPath string, expectedFiles []string) {
require.NotNil(t, bundle)
require.Nil(t, bundle.Errors.Errors)
require.Len(t, bundle.Frames, 1)
require.Contains(t, bundle.Frames, "user_specified")
dirFrame, ok := bundle.Frames["user_specified"].(data.DirectoryFileFrame)
require.True(t, ok)
require.Equal(t, logsPath, dirFrame.Directory)
require.Equal(t, []string{"files"}, dirFrame.Columns())
i := 0
fullPaths := make([]string, len(expectedFiles))
for i, filePath := range expectedFiles {
fullPaths[i] = path.Join(logsPath, filePath)
}
for {
values, ok, err := dirFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Len(t, values, 1)
file, ok := values[0].(data.SimpleFile)
require.True(t, ok)
require.Contains(t, fullPaths, file.FilePath())
i += 1
}
require.Equal(t, len(fullPaths), i)
}

View File

@ -1,153 +0,0 @@
{
"queries": {
"version": [
{
"statement": "SELECT version()"
}
],
"databases": [
{
"statement": "SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT {{.Limit}}"
}
],
"access": [
{
"statement": "SHOW ACCESS"
}
],
"quotas": [
{
"statement": "SHOW QUOTA"
}
],
"db_engines": [
{
"statement": "SELECT engine, count() \"count\" FROM system.databases GROUP BY engine"
}
],
"table_engines": [
{
"statement": "SELECT engine, count() \"count\" FROM system.tables WHERE database != 'system' GROUP BY engine"
}
],
"dictionaries": [
{
"statement": "SELECT source, type, status, count() \"count\" FROM system.dictionaries GROUP BY source, type, status ORDER BY status DESC, source"
}
],
"replicated_tables_by_delay": [
{
"statement": "SELECT database, table, is_leader, is_readonly, absolute_delay, queue_size, inserts_in_queue, merges_in_queue FROM system.replicas ORDER BY absolute_delay DESC LIMIT {{.Limit}}"
}
],
"replication_queue_by_oldest": [
{
"statement": "SELECT database, table, replica_name, position, node_name, type, source_replica, parts_to_merge, new_part_name, create_time, required_quorum, is_detach, is_currently_executing, num_tries, last_attempt_time, last_exception, concat( 'time: ', toString(last_postpone_time), ', number: ', toString(num_postponed), ', reason: ', postpone_reason ) postpone FROM system.replication_queue ORDER BY create_time ASC LIMIT {{.Limit}}"
}
],
"replicated_fetches": [
{
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", partition_id, result_part_name, result_part_path, total_size_bytes_compressed, bytes_read_compressed, source_replica_path, source_replica_hostname, source_replica_port, interserver_scheme, to_detached, thread_id FROM system.replicated_fetches"
}
],
"tables_by_max_partition_count": [
{
"statement": "SELECT database, table, count() \"partitions\", sum(part_count) \"parts\", max(part_count) \"max_parts_per_partition\" FROM ( SELECT database, table, partition, count() \"part_count\" FROM system.parts WHERE active GROUP BY database, table, partition ) partitions GROUP BY database, table ORDER BY max_parts_per_partition DESC LIMIT {{.Limit}}"
}
],
"stack_traces": [
{
"statement": "SELECT '\\n' || arrayStringConcat( arrayMap( x, y -> concat(x, ': ', y), arrayMap(x -> addressToLine(x), trace), arrayMap(x -> demangle(addressToSymbol(x)), trace) ), '\\n' ) AS trace FROM system.stack_trace"
}
],
"crash_log": [
{
"statement": "SELECT event_time, signal, thread_id, query_id, '\\n' || arrayStringConcat(trace_full, '\\n') AS trace, version FROM system.crash_log ORDER BY event_time DESC"
}
],
"merges": [
{
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", is_mutation, partition_id, result_part_path, source_part_paths, num_parts, formatReadableSize(total_size_bytes_compressed) \"total_size_compressed\", formatReadableSize(bytes_read_uncompressed) \"read_uncompressed\", formatReadableSize(bytes_written_uncompressed) \"written_uncompressed\", columns_written, formatReadableSize(memory_usage) \"memory_usage\", thread_id FROM system.merges",
"constraint": ">=20.3"
},
{
"statement": "SELECT database, table, round(elapsed, 1) \"elapsed\", round(100 * progress, 1) \"progress\", is_mutation, partition_id, num_parts, formatReadableSize(total_size_bytes_compressed) \"total_size_compressed\", formatReadableSize(bytes_read_uncompressed) \"read_uncompressed\", formatReadableSize(bytes_written_uncompressed) \"written_uncompressed\", columns_written, formatReadableSize(memory_usage) \"memory_usage\" FROM system.merges"
}
],
"mutations": [
{
"statement": "SELECT database, table, mutation_id, command, create_time, parts_to_do_names, parts_to_do, is_done, latest_failed_part, latest_fail_time, latest_fail_reason FROM system.mutations WHERE NOT is_done ORDER BY create_time DESC",
"constraint": ">=20.3"
},
{
"statement": "SELECT database, table, mutation_id, command, create_time, parts_to_do, is_done, latest_failed_part, latest_fail_time, latest_fail_reason FROM system.mutations WHERE NOT is_done ORDER BY create_time DESC"
}
],
"recent_data_parts": [
{
"statement": "SELECT database, table, engine, partition_id, name, part_type, active, level, disk_name, path, marks, rows, bytes_on_disk, data_compressed_bytes, data_uncompressed_bytes, marks_bytes, modification_time, remove_time, refcount, is_frozen, min_date, max_date, min_time, max_time, min_block_number, max_block_number FROM system.parts WHERE modification_time > now() - INTERVAL 3 MINUTE ORDER BY modification_time DESC",
"constraint": ">=20.3"
},
{
"statement": "SELECT database, table, engine, partition_id, name, active, level, path, marks, rows, bytes_on_disk, data_compressed_bytes, data_uncompressed_bytes, marks_bytes, modification_time, remove_time, refcount, is_frozen, min_date, max_date, min_time, max_time, min_block_number, max_block_number FROM system.parts WHERE modification_time > now() - INTERVAL 3 MINUTE ORDER BY modification_time DESC"
}
],
"detached_parts": [
{
"statement": "SELECT database, table, partition_id, name, disk, reason, min_block_number, max_block_number, level FROM system.detached_parts"
}
],
"processes": [
{
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, thread_ids, ProfileEvents, Settings FROM system.processes ORDER BY elapsed DESC",
"constraint": ">=21.8"
},
{
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.processes ORDER BY elapsed DESC",
"constraint": ">=21.3"
},
{
"statement": "SELECT elapsed, query_id, normalizeQuery(query) AS normalized_query, is_cancelled, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, formatReadableSize(memory_usage) AS \"memory usage\", user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.processes ORDER BY elapsed DESC"
}
],
"top_queries_by_duration": [
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}",
"constraint": ">=21.8"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}",
"constraint": ">=21.3"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY query_duration_ms DESC LIMIT {{.Limit}}"
}
],
"top_queries_by_memory": [
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}",
"constraint": ">=21.8"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}",
"constraint": ">=21.3"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY ORDER BY memory_usage DESC LIMIT {{.Limit}}"
}
],
"failed_queries": [
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents, Settings FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}",
"constraint": ">=21.8"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, databases, tables, columns, used_aggregate_functions, used_aggregate_function_combinators, used_database_engines, used_data_type_families, used_dictionaries, used_formats, used_functions, used_storages, used_table_functions, thread_ids, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}",
"constraint": ">=21.3"
},
{
"statement": "SELECT type, query_start_time, query_duration_ms, query_id, query_kind, is_initial_query, normalizeQuery(query) AS normalized_query, concat( toString(read_rows), ' rows / ', formatReadableSize(read_bytes) ) AS read, concat( toString(written_rows), ' rows / ', formatReadableSize(written_bytes) ) AS written, concat( toString(result_rows), ' rows / ', formatReadableSize(result_bytes) ) AS result, formatReadableSize(memory_usage) AS \"memory usage\", exception, '\\n' || stack_trace AS stack_trace, user, initial_user, multiIf( empty(client_name), http_user_agent, concat( client_name, ' ', toString(client_version_major), '.', toString(client_version_minor), '.', toString(client_version_patch) ) ) AS client, client_hostname, ProfileEvents.Names, ProfileEvents.Values, Settings.Names, Settings.Values FROM system.query_log WHERE type != 'QueryStart' AND event_date >= today() - 1 AND event_time >= now() - INTERVAL 1 DAY AND exception != '' ORDER BY query_start_time DESC LIMIT {{.Limit}}"
}
]
}
}

View File

@ -1,159 +0,0 @@
package clickhouse
import (
"bytes"
_ "embed"
"encoding/json"
"strings"
"text/template"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/Masterminds/semver"
"github.com/pkg/errors"
)
// This collector collects the system db from database
type SummaryCollector struct {
resourceManager *platform.ResourceManager
}
type querySet struct {
Queries map[string][]query `json:"queries"`
}
type query struct {
Statement string `json:"statement"`
Constraint string `json:"constraint"`
}
type ParameterTemplate struct {
Limit int64
}
//go:embed queries.json
var queryFile []byte
func NewSummaryCollector(m *platform.ResourceManager) *SummaryCollector {
return &SummaryCollector{
resourceManager: m,
}
}
func (sc *SummaryCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(sc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
var queries querySet
err = json.Unmarshal(queryFile, &queries)
if err != nil {
return &data.DiagnosticBundle{}, errors.Wrap(err, "Unable to read queries from disk")
}
limit, err := config.ReadIntValue(conf, "row_limit")
if err != nil {
return &data.DiagnosticBundle{}, err
}
paramTemplate := ParameterTemplate{
Limit: limit,
}
frames := make(map[string]data.Frame)
serverVersion, err := getServerSemVersion(sc)
if err != nil {
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Unable to read server version")
}
var frameErrors []error
for queryId, sqlQueries := range queries.Queries {
// we find the first matching query that satisfies the current version. Empty version means ANY version is
// supported
for _, sqlQuery := range sqlQueries {
var queryConstraint *semver.Constraints
if sqlQuery.Constraint != "" {
queryConstraint, err = semver.NewConstraint(sqlQuery.Constraint)
if err != nil {
//we try another one
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to parse version %s for query %s", sqlQuery.Constraint, queryId))
continue
}
}
if sqlQuery.Constraint == "" || queryConstraint.Check(serverVersion) {
tmpl, err := template.New(queryId).Parse(sqlQuery.Statement)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to parse query %s", queryId))
//we try another one
continue
}
buf := new(bytes.Buffer)
err = tmpl.Execute(buf, paramTemplate)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to process query %s template", queryId))
//we try another one
continue
}
frame, err := sc.resourceManager.DbClient.ExecuteStatement(queryId, buf.String())
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to execute query %s", queryId))
//we try another one
} else {
frames[queryId] = frame
// only 1 query executed
break
}
}
}
}
fErrors := data.FrameErrors{
Errors: frameErrors,
}
return &data.DiagnosticBundle{
Frames: frames,
Errors: fErrors,
}, nil
}
func getServerSemVersion(sc *SummaryCollector) (*semver.Version, error) {
serverVersion, err := sc.resourceManager.DbClient.Version()
if err != nil {
return &semver.Version{}, err
}
//drop the build number - it is not a semantic version
versionComponents := strings.Split(serverVersion, ".")
serverVersion = strings.Join(versionComponents[:len(versionComponents)-1], ".")
return semver.NewVersion(serverVersion)
}
func (sc *SummaryCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: 20,
Param: config.NewParam("row_limit", "Limit rows on supported queries.", false),
},
},
}
}
func (sc *SummaryCollector) IsDefault() bool {
return true
}
func (sc *SummaryCollector) Description() string {
return "Collects summary statistics on the database based on a set of known useful queries."
}
// here we register the collector for use
func init() {
collectors.Register("summary", func() (collectors.Collector, error) {
return &SummaryCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,111 +0,0 @@
package clickhouse_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
func TestSummaryConfiguration(t *testing.T) {
t.Run("correct configuration is returned for summary collector", func(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
summaryCollector := clickhouse.NewSummaryCollector(&platform.ResourceManager{
DbClient: client,
})
conf := summaryCollector.Configuration()
require.Len(t, conf.Params, 1)
require.IsType(t, config.IntParam{}, conf.Params[0])
limit, ok := conf.Params[0].(config.IntParam)
require.True(t, ok)
require.False(t, limit.Required())
require.Equal(t, limit.Name(), "row_limit")
require.Equal(t, int64(20), limit.Value)
})
}
func TestSummaryCollection(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
versionFrame := test.NewFakeDataFrame("version", []string{"version()"},
[][]interface{}{
{"22.1.3.7"},
},
)
client.QueryResponses["SELECT version()"] = &versionFrame
databasesFrame := test.NewFakeDataFrame("databases", []string{"name", "engine", "tables", "partitions", "parts", "disk_size"},
[][]interface{}{
{"tutorial", "Atomic", 2, 2, 2, "1.70 GiB"},
{"default", "Atomic", 5, 5, 6, "1.08 GiB"},
{"system", "Atomic", 11, 24, 70, "1.05 GiB"},
{"INFORMATION_SCHEMA", "Memory", 0, 0, 0, "0.00 B"},
{"covid19db", "Atomic", 0, 0, 0, "0.00 B"},
{"information_schema", "Memory", 0, 0, 0, "0.00 B"}})
client.QueryResponses["SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" "+
"FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", "+
"count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats "+
"ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT 20"] = &databasesFrame
summaryCollector := clickhouse.NewSummaryCollector(&platform.ResourceManager{
DbClient: client,
})
t.Run("test default summary collection", func(t *testing.T) {
bundle, errs := summaryCollector.Collect(config.Configuration{})
require.Empty(t, errs)
require.Len(t, bundle.Errors.Errors, 30)
require.NotNil(t, bundle)
require.Len(t, bundle.Frames, 2)
// check version frame
require.Contains(t, bundle.Frames, "version")
require.Equal(t, []string{"version()"}, bundle.Frames["version"].Columns())
checkFrame(t, bundle.Frames["version"], versionFrame.Rows)
//check databases frame
require.Contains(t, bundle.Frames, "databases")
require.Equal(t, []string{"name", "engine", "tables", "partitions", "parts", "disk_size"}, bundle.Frames["databases"].Columns())
checkFrame(t, bundle.Frames["databases"], databasesFrame.Rows)
client.Reset()
})
t.Run("test summary collection with limit", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: 1,
Param: config.NewParam("row_limit", "Limit rows on supported queries.", false),
},
},
}
bundle, errs := summaryCollector.Collect(conf)
require.Empty(t, errs)
require.Len(t, bundle.Errors.Errors, 31)
require.NotNil(t, bundle)
// databases will be absent due to limit
require.Len(t, bundle.Frames, 1)
// check version frame
require.Contains(t, bundle.Frames, "version")
require.Equal(t, []string{"version()"}, bundle.Frames["version"].Columns())
checkFrame(t, bundle.Frames["version"], versionFrame.Rows)
client.QueryResponses["SELECT name, engine, tables, partitions, parts, formatReadableSize(bytes_on_disk) \"disk_size\" "+
"FROM system.databases db LEFT JOIN ( SELECT database, uniq(table) \"tables\", uniq(table, partition) \"partitions\", "+
"count() AS parts, sum(bytes_on_disk) \"bytes_on_disk\" FROM system.parts WHERE active GROUP BY database ) AS db_stats "+
"ON db.name = db_stats.database ORDER BY bytes_on_disk DESC LIMIT 1"] = &databasesFrame
bundle, errs = summaryCollector.Collect(conf)
require.Empty(t, errs)
require.Len(t, bundle.Errors.Errors, 30)
require.NotNil(t, bundle)
require.Len(t, bundle.Frames, 2)
require.Contains(t, bundle.Frames, "version")
//check databases frame
require.Contains(t, bundle.Frames, "databases")
require.Equal(t, []string{"name", "engine", "tables", "partitions", "parts", "disk_size"}, bundle.Frames["databases"].Columns())
// this will parse as our mock client does not read statement (specifically the limit clause) when called with execute
checkFrame(t, bundle.Frames["databases"], databasesFrame.Rows)
})
}

View File

@ -1,165 +0,0 @@
package clickhouse
import (
"fmt"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/pkg/errors"
)
// This collector collects the system db from database
type SystemDatabaseCollector struct {
resourceManager *platform.ResourceManager
}
const SystemDatabase = "system"
// ExcludeColumns columns if we need - this will be refined over time [table_name][columnA, columnB]
var ExcludeColumns = map[string][]string{}
// BannedTables - Hardcoded list. These are always excluded even if the user doesn't specify in exclude_tables.
//Attempts to export will work but we will warn
var BannedTables = []string{"numbers", "zeros"}
// OrderBy contains a map of tables to an order by clause - by default we don't order table dumps
var OrderBy = map[string]data.OrderBy{
"errors": {
Column: "last_error_message",
Order: data.Desc,
},
"replication_queue": {
Column: "create_time",
Order: data.Asc,
},
}
func NewSystemDatabaseCollector(m *platform.ResourceManager) *SystemDatabaseCollector {
return &SystemDatabaseCollector{
resourceManager: m,
}
}
func (sc *SystemDatabaseCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(sc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
includeTables, err := config.ReadStringListValues(conf, "include_tables")
if err != nil {
return &data.DiagnosticBundle{}, err
}
excludeTables, err := config.ReadStringListValues(conf, "exclude_tables")
if err != nil {
return &data.DiagnosticBundle{}, err
}
rowLimit, err := config.ReadIntValue(conf, "row_limit")
if err != nil {
return &data.DiagnosticBundle{}, err
}
excludeTables = checkBannedTables(includeTables, excludeTables)
ds, err := sc.readSystemAllTables(includeTables, excludeTables, rowLimit)
if err != nil {
return &data.DiagnosticBundle{}, err
}
return ds, nil
}
// all banned tables are added to excluded if not present and not specified in included. Returns new exclude_tables list.
func checkBannedTables(includeTables []string, excludeTables []string) []string {
for _, bannedTable := range BannedTables {
//if its specified we don't add to our exclude list - explicitly included tables take precedence
if !utils.Contains(includeTables, bannedTable) && !utils.Contains(excludeTables, bannedTable) {
excludeTables = append(excludeTables, bannedTable)
}
}
return excludeTables
}
func (sc *SystemDatabaseCollector) readSystemAllTables(include []string, exclude []string, limit int64) (*data.DiagnosticBundle, error) {
tableNames, err := sc.resourceManager.DbClient.ReadTableNamesForDatabase(SystemDatabase)
if err != nil {
return nil, err
}
var frameErrors []error
if include != nil {
// nil means include everything
tableNames = utils.Intersection(tableNames, include)
if len(tableNames) != len(include) {
// we warn that some included tables aren't present in db
frameErrors = append(frameErrors, fmt.Errorf("some tables specified in the include_tables are not in the system database and will not be exported: %v",
utils.Distinct(include, tableNames)))
}
}
// exclude tables unless specified in includes
excludedTables := utils.Distinct(exclude, include)
tableNames = utils.Distinct(tableNames, excludedTables)
frames := make(map[string]data.Frame)
for _, tableName := range tableNames {
var excludeColumns []string
if _, ok := ExcludeColumns[tableName]; ok {
excludeColumns = ExcludeColumns[tableName]
}
orderBy := data.OrderBy{}
if _, ok := OrderBy[tableName]; ok {
orderBy = OrderBy[tableName]
}
frame, err := sc.resourceManager.DbClient.ReadTable(SystemDatabase, tableName, excludeColumns, orderBy, limit)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to collect %s", tableName))
} else {
frames[tableName] = frame
}
}
fErrors := data.FrameErrors{
Errors: frameErrors,
}
return &data.DiagnosticBundle{
Frames: frames,
Errors: fErrors,
}, nil
}
func (sc *SystemDatabaseCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Specify list of tables to collect. Takes precedence over exclude_tables. If not specified (default) all tables except exclude_tables.", false),
},
config.StringListParam{
Values: []string{"licenses", "distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper", "aggregate_function_combinators", "collations", "contributors", "data_type_families", "formats", "graphite_retentions", "numbers", "numbers_mt", "one", "parts_columns", "projection_parts", "projection_parts_columns", "table_engines", "time_zones", "zeros", "zeros_mt"},
Param: config.NewParam("exclude_tables", "Specify list of tables to not collect.", false),
},
config.IntParam{
Value: 100000,
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
},
},
}
}
func (sc *SystemDatabaseCollector) IsDefault() bool {
return true
}
func (sc *SystemDatabaseCollector) Description() string {
return "Collects all tables in the system database, except those which have been excluded."
}
// here we register the collector for use
func init() {
collectors.Register("system_db", func() (collectors.Collector, error) {
return &SystemDatabaseCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,366 +0,0 @@
package clickhouse_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
func TestSystemConfiguration(t *testing.T) {
t.Run("correct configuration is returned for system db collector", func(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
systemDbCollector := clickhouse.NewSystemDatabaseCollector(&platform.ResourceManager{
DbClient: client,
})
conf := systemDbCollector.Configuration()
require.Len(t, conf.Params, 3)
// check first param
require.IsType(t, config.StringListParam{}, conf.Params[0])
includeTables, ok := conf.Params[0].(config.StringListParam)
require.True(t, ok)
require.False(t, includeTables.Required())
require.Equal(t, includeTables.Name(), "include_tables")
require.Nil(t, includeTables.Values)
// check second param
require.IsType(t, config.StringListParam{}, conf.Params[1])
excludeTables, ok := conf.Params[1].(config.StringListParam)
require.True(t, ok)
require.False(t, excludeTables.Required())
require.Equal(t, "exclude_tables", excludeTables.Name())
require.Equal(t, []string{"licenses", "distributed_ddl_queue", "query_thread_log", "query_log", "asynchronous_metric_log", "zookeeper", "aggregate_function_combinators", "collations", "contributors", "data_type_families", "formats", "graphite_retentions", "numbers", "numbers_mt", "one", "parts_columns", "projection_parts", "projection_parts_columns", "table_engines", "time_zones", "zeros", "zeros_mt"}, excludeTables.Values)
// check third param
require.IsType(t, config.IntParam{}, conf.Params[2])
rowLimit, ok := conf.Params[2].(config.IntParam)
require.True(t, ok)
require.False(t, rowLimit.Required())
require.Equal(t, "row_limit", rowLimit.Name())
require.Equal(t, int64(100000), rowLimit.Value)
})
}
func TestSystemDbCollect(t *testing.T) {
diskFrame := test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space", "keep_free_space", "type"},
[][]interface{}{
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352, "", "local"},
},
)
clusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
[][]interface{}{
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9000, 1, "default", "", 0, 0, 0},
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9000, 1, "default", "", 0, 0, 0},
},
)
userFrame := test.NewFakeDataFrame("users", []string{"name", "id", "storage", "auth_type", "auth_params", "host_ip", "host_names", "host_names_regexp", "host_names_like"},
[][]interface{}{
{"default", "94309d50-4f52-5250-31bd-74fecac179db,users.xml,plaintext_password", "sha256_password", []string{"::0"}, []string{}, []string{}, []string{}},
},
)
dbTables := map[string][]string{
clickhouse.SystemDatabase: {"disks", "clusters", "users"},
}
client := test.NewFakeClickhouseClient(dbTables)
client.QueryResponses["SELECT * FROM system.disks LIMIT 100000"] = &diskFrame
client.QueryResponses["SELECT * FROM system.clusters LIMIT 100000"] = &clusterFrame
client.QueryResponses["SELECT * FROM system.users LIMIT 100000"] = &userFrame
systemDbCollector := clickhouse.NewSystemDatabaseCollector(&platform.ResourceManager{
DbClient: client,
})
t.Run("test default system db collection", func(t *testing.T) {
diagSet, err := systemDbCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
// disks frame
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
// clusters frame
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
// users frame
require.Equal(t, "users", diagSet.Frames["users"].Name())
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
client.Reset()
})
t.Run("test when we pass an includes", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: []string{"disks"},
Param: config.NewParam("include_tables", "Exclusion", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 1)
// disks frame
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
client.Reset()
})
// test excludes
t.Run("test when we pass an excludes", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
Values: []string{"disks"},
Param: config.NewParam("exclude_tables", "Exclusion", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 2)
// clusters frame
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
// users frame
require.Equal(t, "users", diagSet.Frames["users"].Name())
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
client.Reset()
})
// test includes which isn't in the list
t.Run("test when we pass an invalid includes", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: []string{"disks", "invalid"},
Param: config.NewParam("include_tables", "Exclusion", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 1)
require.Equal(t, diagSet.Errors.Error(), "some tables specified in the include_tables are not in the "+
"system database and will not be exported: [invalid]")
require.Len(t, diagSet.Frames, 1)
// disks frame
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
client.Reset()
})
t.Run("test when we use a table with excluded fields", func(t *testing.T) {
excludeDefault := clickhouse.ExcludeColumns
client.QueryResponses["SELECT * EXCEPT(keep_free_space,type) FROM system.disks LIMIT 100000"] = &diskFrame
clickhouse.ExcludeColumns = map[string][]string{
"disks": {"keep_free_space", "type"},
}
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: []string{"disks"},
Param: config.NewParam("include_tables", "Exclusion", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 1)
// disks frame
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
require.Equal(t, []string{"name", "path", "free_space", "total_space"}, diagSet.Frames["disks"].Columns())
eDiskFrame := test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space"},
[][]interface{}{
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352},
},
)
checkFrame(t, diagSet.Frames["disks"], eDiskFrame.Rows)
clickhouse.ExcludeColumns = excludeDefault
client.Reset()
})
t.Run("test with a low row limit", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: 1,
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
},
},
}
client.QueryResponses["SELECT * FROM system.disks LIMIT 1"] = &diskFrame
client.QueryResponses["SELECT * FROM system.clusters LIMIT 1"] = &clusterFrame
client.QueryResponses["SELECT * FROM system.users LIMIT 1"] = &userFrame
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
// clusters frame
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
lClusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
[][]interface{}{
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
})
checkFrame(t, diagSet.Frames["clusters"], lClusterFrame.Rows)
client.Reset()
})
t.Run("test with a negative low row limit", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
Value: -23,
Param: config.NewParam("row_limit", "Maximum number of rows to collect from any table. Negative values mean unlimited.", false),
},
},
}
client.QueryResponses["SELECT * FROM system.clusters"] = &clusterFrame
client.QueryResponses["SELECT * FROM system.disks"] = &diskFrame
client.QueryResponses["SELECT * FROM system.users"] = &userFrame
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
// disks frame
require.Equal(t, "disks", diagSet.Frames["disks"].Name())
require.Equal(t, diskFrame.ColumnNames, diagSet.Frames["disks"].Columns())
checkFrame(t, diagSet.Frames["disks"], diskFrame.Rows)
// clusters frame
require.Equal(t, "clusters", diagSet.Frames["clusters"].Name())
require.Equal(t, clusterFrame.ColumnNames, diagSet.Frames["clusters"].Columns())
checkFrame(t, diagSet.Frames["clusters"], clusterFrame.Rows)
// users frame
require.Equal(t, "users", diagSet.Frames["users"].Name())
require.Equal(t, userFrame.ColumnNames, diagSet.Frames["users"].Columns())
checkFrame(t, diagSet.Frames["users"], userFrame.Rows)
client.Reset()
})
t.Run("test that includes overrides excludes", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: []string{"disks"},
Param: config.NewParam("exclude_tables", "Excluded", false),
},
config.StringListParam{
// nil means include everything
Values: []string{"disks", "clusters", "users"},
Param: config.NewParam("include_tables", "Included", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
client.Reset()
})
t.Run("test banned", func(t *testing.T) {
bannedDefault := clickhouse.BannedTables
clickhouse.BannedTables = []string{"disks"}
diagSet, err := systemDbCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 2)
require.Contains(t, diagSet.Frames, "users")
require.Contains(t, diagSet.Frames, "clusters")
clickhouse.BannedTables = bannedDefault
client.Reset()
})
t.Run("test banned unless included", func(t *testing.T) {
bannedDefault := clickhouse.BannedTables
clickhouse.BannedTables = []string{"disks"}
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: []string{"disks", "clusters", "users"},
Param: config.NewParam("include_tables", "Included", false),
},
},
}
diagSet, err := systemDbCollector.Collect(conf)
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
require.Contains(t, diagSet.Frames, "disks")
require.Contains(t, diagSet.Frames, "users")
require.Contains(t, diagSet.Frames, "clusters")
clickhouse.BannedTables = bannedDefault
client.Reset()
})
t.Run("tables are ordered if configured", func(t *testing.T) {
defaultOrderBy := clickhouse.OrderBy
clickhouse.OrderBy = map[string]data.OrderBy{
"clusters": {
Column: "shard_num",
Order: data.Desc,
},
}
client.QueryResponses["SELECT * FROM system.clusters ORDER BY shard_num DESC LIMIT 100000"] = &clusterFrame
diagSet, err := systemDbCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 3)
clickhouse.OrderBy = defaultOrderBy
oClusterFrame := test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
[][]interface{}{
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9000, 1, "default", "", 0, 0, 0},
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9000, 1, "default", "", 0, 0, 0},
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
},
)
checkFrame(t, diagSet.Frames["clusters"], oClusterFrame.Rows)
client.Reset()
})
}
func checkFrame(t *testing.T, frame data.Frame, rows [][]interface{}) {
i := 0
for {
values, ok, err := frame.Next()
require.Nil(t, err)
if !ok {
break
}
require.ElementsMatch(t, rows[i], values)
i += 1
}
require.Equal(t, i, len(rows))
}

View File

@ -1,153 +0,0 @@
package clickhouse
import (
"fmt"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/bmatcuk/doublestar/v4"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
// This collector collects the system zookeeper db
type ZookeeperCollector struct {
resourceManager *platform.ResourceManager
}
func NewZookeeperCollector(m *platform.ResourceManager) *ZookeeperCollector {
return &ZookeeperCollector{
resourceManager: m,
}
}
func (zkc *ZookeeperCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(zkc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
pathPattern, err := config.ReadStringValue(conf, "path_pattern")
if err != nil {
return &data.DiagnosticBundle{}, err
}
defaultPattern, _ := zkc.Configuration().GetConfigParam("path_pattern")
if defaultPattern.(config.StringParam).Value != pathPattern {
log.Warn().Msgf("Using non default zookeeper glob pattern [%s] - this can potentially cause high query load", pathPattern)
}
maxDepth, err := config.ReadIntValue(conf, "max_depth")
if err != nil {
return &data.DiagnosticBundle{}, err
}
rowLimit, err := config.ReadIntValue(conf, "row_limit")
if err != nil {
return &data.DiagnosticBundle{}, err
}
// we use doublestar for globs as it provides us with ** but also allows us to identify prefix or base paths
if !doublestar.ValidatePattern(pathPattern) {
return &data.DiagnosticBundle{}, errors.Wrapf(err, "%s is not a valid pattern", pathPattern)
}
base, _ := doublestar.SplitPattern(pathPattern)
frames := make(map[string]data.Frame)
hFrame, frameErrors := zkc.collectSubFrames(base, pathPattern, rowLimit, 0, maxDepth)
fErrors := data.FrameErrors{
Errors: frameErrors,
}
frames["zookeeper_db"] = hFrame
return &data.DiagnosticBundle{
Frames: frames,
Errors: fErrors,
}, nil
}
// recursively iterates over the zookeeper sub tables to a max depth, applying the filter and max rows per table
func (zkc *ZookeeperCollector) collectSubFrames(path, pathPattern string, rowLimit, currentDepth, maxDepth int64) (data.HierarchicalFrame, []error) {
var frameErrors []error
var subFrames []data.HierarchicalFrame
currentDepth += 1
if currentDepth == maxDepth {
return data.HierarchicalFrame{}, frameErrors
}
match, err := doublestar.PathMatch(pathPattern, path)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Path match failed for pattern %s with path %s", pathPattern, path))
return data.HierarchicalFrame{}, frameErrors
}
// we allow a single level to be examined or we never get going
if !match && currentDepth > 1 {
return data.HierarchicalFrame{}, frameErrors
}
frame, err := zkc.resourceManager.DbClient.ExecuteStatement(path, fmt.Sprintf("SELECT name FROM system.zookeeper WHERE path='%s' LIMIT %d", path, rowLimit))
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read zookeeper table path for sub paths %s", path))
return data.HierarchicalFrame{}, frameErrors
}
// this isn't ideal, we add re-execute the query to our collection as this will be consumed by the output lazily
outputFrame, err := zkc.resourceManager.DbClient.ExecuteStatement(path, fmt.Sprintf("SELECT * FROM system.zookeeper WHERE path='%s' LIMIT %d", path, rowLimit))
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read zookeeper table path %s", path))
return data.HierarchicalFrame{}, frameErrors
}
frameComponents := strings.Split(path, "/")
frameId := frameComponents[len(frameComponents)-1]
for {
values, ok, err := frame.Next()
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "unable to read frame %s", frame.Name()))
return data.NewHierarchicalFrame(frameId, outputFrame, subFrames), frameErrors
}
if !ok {
return data.NewHierarchicalFrame(frameId, outputFrame, subFrames), frameErrors
}
subName := fmt.Sprintf("%v", values[0])
subPath := fmt.Sprintf("%s/%s", path, subName)
subFrame, errs := zkc.collectSubFrames(subPath, pathPattern, rowLimit, currentDepth, maxDepth)
if subFrame.Name() != "" {
subFrames = append(subFrames, subFrame)
}
frameErrors = append(frameErrors, errs...)
}
}
func (zkc *ZookeeperCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "/clickhouse/{task_queue}/**",
Param: config.NewParam("path_pattern", "Glob pattern for zookeeper path matching. Change with caution.", false),
},
config.IntParam{
Value: 8,
Param: config.NewParam("max_depth", "Max depth for zookeeper navigation.", false),
},
config.IntParam{
Value: 10,
Param: config.NewParam("row_limit", "Maximum number of rows/sub nodes to collect/expand from any zookeeper leaf. Negative values mean unlimited.", false),
},
},
}
}
func (zkc *ZookeeperCollector) IsDefault() bool {
return false
}
func (zkc *ZookeeperCollector) Description() string {
return "Collects Zookeeper information available within ClickHouse."
}
// here we register the collector for use
func init() {
collectors.Register("zookeeper_db", func() (collectors.Collector, error) {
return &ZookeeperCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,102 +0,0 @@
package clickhouse_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
func TestZookeeperConfiguration(t *testing.T) {
t.Run("correct configuration is returned for system zookeeper collector", func(t *testing.T) {
client := test.NewFakeClickhouseClient(make(map[string][]string))
zkCollector := clickhouse.NewZookeeperCollector(&platform.ResourceManager{
DbClient: client,
})
conf := zkCollector.Configuration()
require.Len(t, conf.Params, 3)
// check first param
require.IsType(t, config.StringParam{}, conf.Params[0])
pathPattern, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.False(t, pathPattern.Required())
require.Equal(t, pathPattern.Name(), "path_pattern")
require.Equal(t, "/clickhouse/{task_queue}/**", pathPattern.Value)
// check second param
require.IsType(t, config.IntParam{}, conf.Params[1])
maxDepth, ok := conf.Params[1].(config.IntParam)
require.True(t, ok)
require.False(t, maxDepth.Required())
require.Equal(t, "max_depth", maxDepth.Name())
require.Equal(t, int64(8), maxDepth.Value)
// check third param
require.IsType(t, config.IntParam{}, conf.Params[2])
rowLimit, ok := conf.Params[2].(config.IntParam)
require.True(t, ok)
require.False(t, rowLimit.Required())
require.Equal(t, "row_limit", rowLimit.Name())
require.Equal(t, int64(10), rowLimit.Value)
})
}
func TestZookeeperCollect(t *testing.T) {
level1 := test.NewFakeDataFrame("level_1", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
[][]interface{}{
{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
{"task_queue", "", "4", "4", "2022-02-22 13:30:15", "2022-02-22 13:30:15", "0", "1", "0", "0", "0", "1", "5", "/clickhouse"},
{"copytasks", "", "525608", "525608", "2022-03-09 13:47:39", "2022-03-09 13:47:39", "0", "7", "0", "0", "0", "7", "526100", "/clickhouse"},
},
)
level2 := test.NewFakeDataFrame("level_2", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
[][]interface{}{
{"ddl", "", "5", "5", "2022-02-22 13:30:15", "2022-02-22 13:30:15", "0", "0", "0", "0", "0", "0", "5", "/clickhouse/task_queue"},
},
)
level3 := test.NewFakeDataFrame("level_2", []string{"name", "value", "czxid", "mzxid", "ctime", "mtime", "version", "cversion", "aversion", "ephemeralOwner", "dataLength", "numChildren", "pzxid", "path"},
[][]interface{}{},
)
dbTables := map[string][]string{
clickhouse.SystemDatabase: {"zookeeper"},
}
client := test.NewFakeClickhouseClient(dbTables)
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse' LIMIT 10"] = &level1
// can't reuse the frame as the first frame will be iterated as part of the recursive zookeeper search performed by the collector
cLevel1 := test.NewFakeDataFrame("level_1", level1.Columns(), level1.Rows)
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse' LIMIT 10"] = &cLevel1
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse/task_queue' LIMIT 10"] = &level2
cLevel2 := test.NewFakeDataFrame("level_2", level2.Columns(), level2.Rows)
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse/task_queue' LIMIT 10"] = &cLevel2
client.QueryResponses["SELECT name FROM system.zookeeper WHERE path='/clickhouse/task_queue/ddl' LIMIT 10"] = &level3
cLevel3 := test.NewFakeDataFrame("level_3", level3.Columns(), level3.Rows)
client.QueryResponses["SELECT * FROM system.zookeeper WHERE path='/clickhouse/task_queue/ddl' LIMIT 10"] = &cLevel3
zKCollector := clickhouse.NewZookeeperCollector(&platform.ResourceManager{
DbClient: client,
})
t.Run("test default zookeeper collection", func(t *testing.T) {
diagSet, err := zKCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 1)
require.Contains(t, diagSet.Frames, "zookeeper_db")
require.Equal(t, "clickhouse", diagSet.Frames["zookeeper_db"].Name())
require.IsType(t, data.HierarchicalFrame{}, diagSet.Frames["zookeeper_db"])
checkFrame(t, diagSet.Frames["zookeeper_db"], level1.Rows)
require.Equal(t, level1.Columns(), diagSet.Frames["zookeeper_db"].Columns())
hierarchicalFrame := diagSet.Frames["zookeeper_db"].(data.HierarchicalFrame)
require.Len(t, hierarchicalFrame.SubFrames, 1)
checkFrame(t, hierarchicalFrame.SubFrames[0], cLevel2.Rows)
require.Equal(t, cLevel2.Columns(), hierarchicalFrame.SubFrames[0].Columns())
hierarchicalFrame = hierarchicalFrame.SubFrames[0]
require.Len(t, hierarchicalFrame.SubFrames, 1)
checkFrame(t, hierarchicalFrame.SubFrames[0], cLevel3.Rows)
require.Equal(t, cLevel3.Columns(), hierarchicalFrame.SubFrames[0].Columns())
})
}

View File

@ -1,75 +0,0 @@
package collectors
import (
"fmt"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
type Collector interface {
Collect(config config.Configuration) (*data.DiagnosticBundle, error)
Configuration() config.Configuration
IsDefault() bool
Description() string
}
// Register can be called from init() on a collector in this package
// It will automatically be added to the Collectors map to be called externally
func Register(name string, collector CollectorFactory) {
if name == "diag_trace" {
// we use this to record errors and warnings
log.Fatal().Msgf("diag_trace is a reserved collector name")
}
// names must be unique
if _, ok := Collectors[name]; ok {
log.Fatal().Msgf("More than 1 collector is trying to register under the name %s. Names must be unique.", name)
}
Collectors[name] = collector
}
// CollectorFactory lets us use a closure to get instances of the collector struct
type CollectorFactory func() (Collector, error)
var Collectors = map[string]CollectorFactory{}
func GetCollectorNames(defaultOnly bool) []string {
// can't pre-allocate as not all maybe default
var collectors []string
for collectorName := range Collectors {
collector, err := GetCollectorByName(collectorName)
if err != nil {
log.Fatal().Err(err)
}
if !defaultOnly || (defaultOnly && collector.IsDefault()) {
collectors = append(collectors, collectorName)
}
}
return collectors
}
func GetCollectorByName(name string) (Collector, error) {
if collectorFactory, ok := Collectors[name]; ok {
//do something here
collector, err := collectorFactory()
if err != nil {
return nil, errors.Wrapf(err, "collector %s could not be initialized", name)
}
return collector, nil
}
return nil, fmt.Errorf("%s is not a valid collector name", name)
}
func BuildConfigurationOptions() (map[string]config.Configuration, error) {
configurations := make(map[string]config.Configuration)
for name, collectorFactory := range Collectors {
collector, err := collectorFactory()
if err != nil {
return nil, errors.Wrapf(err, "collector %s could not be initialized", name)
}
configurations[name] = collector.Configuration()
}
return configurations, nil
}

View File

@ -1,57 +0,0 @@
package collectors_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/stretchr/testify/require"
)
func TestGetCollectorNames(t *testing.T) {
t.Run("can get all collector names", func(t *testing.T) {
collectorNames := collectors.GetCollectorNames(false)
require.ElementsMatch(t, []string{"system_db", "config", "summary", "system", "logs", "db_logs", "file", "command", "zookeeper_db"}, collectorNames)
})
t.Run("can get default collector names", func(t *testing.T) {
collectorNames := collectors.GetCollectorNames(true)
require.ElementsMatch(t, []string{"system_db", "config", "summary", "system", "logs", "db_logs"}, collectorNames)
})
}
func TestGetCollectorByName(t *testing.T) {
t.Run("can get collector by name", func(t *testing.T) {
collector, err := collectors.GetCollectorByName("system_db")
require.Nil(t, err)
require.Equal(t, clickhouse.NewSystemDatabaseCollector(platform.GetResourceManager()), collector)
})
t.Run("fails on non existing collector", func(t *testing.T) {
collector, err := collectors.GetCollectorByName("random")
require.NotNil(t, err)
require.Equal(t, "random is not a valid collector name", err.Error())
require.Nil(t, collector)
})
}
func TestBuildConfigurationOptions(t *testing.T) {
t.Run("can get all collector configurations", func(t *testing.T) {
configs, err := collectors.BuildConfigurationOptions()
require.Nil(t, err)
require.Len(t, configs, 9)
require.Contains(t, configs, "system_db")
require.Contains(t, configs, "config")
require.Contains(t, configs, "summary")
require.Contains(t, configs, "system")
require.Contains(t, configs, "logs")
require.Contains(t, configs, "db_logs")
require.Contains(t, configs, "file")
require.Contains(t, configs, "command")
require.Contains(t, configs, "zookeeper_db")
})
}

View File

@ -1,90 +0,0 @@
package system
import (
"bytes"
"os/exec"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/google/shlex"
"github.com/pkg/errors"
)
// This collector runs a user specified command and collects it to a file
type CommandCollector struct {
resourceManager *platform.ResourceManager
}
func NewCommandCollector(m *platform.ResourceManager) *CommandCollector {
return &CommandCollector{
resourceManager: m,
}
}
func (c *CommandCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(c.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
command, err := config.ReadStringValue(conf, "command")
if err != nil {
return &data.DiagnosticBundle{}, err
}
var frameErrors []error
// shlex to split the commands and args
cmdArgs, err := shlex.Split(command)
if err != nil || len(cmdArgs) == 0 {
return &data.DiagnosticBundle{}, errors.Wrap(err, "Unable to parse command")
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err = cmd.Run()
var sError string
if err != nil {
frameErrors = append(frameErrors, errors.Wrap(err, "Unable to execute command"))
sError = err.Error()
}
memoryFrame := data.NewMemoryFrame("output", []string{"command", "stdout", "stderr", "error"}, [][]interface{}{
{command, stdout.String(), stderr.String(), sError},
})
return &data.DiagnosticBundle{
Errors: data.FrameErrors{Errors: frameErrors},
Frames: map[string]data.Frame{
"output": memoryFrame,
},
}, nil
}
func (c *CommandCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("command", "Command to execute", true),
AllowEmpty: false,
},
},
}
}
func (c *CommandCollector) IsDefault() bool {
return false
}
func (c *CommandCollector) Description() string {
return "Allows collection of the output from a user specified command"
}
// here we register the collector for use
func init() {
collectors.Register("command", func() (collectors.Collector, error) {
return &CommandCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,107 +0,0 @@
package system_test
import (
"fmt"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestCommandConfiguration(t *testing.T) {
t.Run("correct configuration is returned for file collector", func(t *testing.T) {
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
conf := commandCollector.Configuration()
require.Len(t, conf.Params, 1)
require.IsType(t, config.StringParam{}, conf.Params[0])
command, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.True(t, command.Required())
require.Equal(t, command.Name(), "command")
require.Equal(t, "", command.Value)
})
}
func TestCommandCollect(t *testing.T) {
t.Run("test simple command with args", func(t *testing.T) {
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
bundle, err := commandCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "ls -l ../../../testdata",
Param: config.NewParam("command", "Command to execute", true),
AllowEmpty: false,
},
},
})
require.Nil(t, err)
require.Nil(t, bundle.Errors.Errors)
require.Len(t, bundle.Frames, 1)
require.Contains(t, bundle.Frames, "output")
require.Equal(t, bundle.Frames["output"].Columns(), []string{"command", "stdout", "stderr", "error"})
memFrame := bundle.Frames["output"].(data.MemoryFrame)
values, ok, err := memFrame.Next()
require.True(t, ok)
require.Nil(t, err)
fmt.Println(values)
require.Len(t, values, 4)
require.Equal(t, "ls -l ../../../testdata", values[0])
require.Contains(t, values[1], "configs")
require.Contains(t, values[1], "docker")
require.Contains(t, values[1], "log")
require.Equal(t, "", values[2])
require.Equal(t, "", values[3])
values, ok, err = memFrame.Next()
require.False(t, ok)
require.Nil(t, err)
require.Nil(t, values)
})
t.Run("test empty command", func(t *testing.T) {
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
bundle, err := commandCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("command", "Command to execute", true),
AllowEmpty: false,
},
},
})
require.Equal(t, "parameter command is invalid - command cannot be empty", err.Error())
require.Equal(t, &data.DiagnosticBundle{}, bundle)
})
t.Run("test invalid command", func(t *testing.T) {
commandCollector := system.NewCommandCollector(&platform.ResourceManager{})
bundle, err := commandCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "ls --invalid ../../../testdata",
Param: config.NewParam("command", "Command to execute", true),
AllowEmpty: false,
},
},
})
// commands may error with output - we still capture on stderr
require.Nil(t, err)
require.Len(t, bundle.Errors.Errors, 1)
require.Equal(t, "Unable to execute command: exit status 2", bundle.Errors.Errors[0].Error())
require.Len(t, bundle.Frames, 1)
require.Contains(t, bundle.Frames, "output")
require.Equal(t, bundle.Frames["output"].Columns(), []string{"command", "stdout", "stderr", "error"})
memFrame := bundle.Frames["output"].(data.MemoryFrame)
values, ok, err := memFrame.Next()
require.True(t, ok)
require.Nil(t, err)
require.Len(t, values, 4)
require.Equal(t, "ls --invalid ../../../testdata", values[0])
require.Equal(t, "", values[1])
// exact values here may vary on platform
require.NotEmpty(t, values[2])
require.NotEmpty(t, values[3])
})
}

View File

@ -1,100 +0,0 @@
package system
import (
"os"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
"github.com/yargevad/filepathx"
)
// This collector collects arbitrary user files
type FileCollector struct {
resourceManager *platform.ResourceManager
}
func NewFileCollector(m *platform.ResourceManager) *FileCollector {
return &FileCollector{
resourceManager: m,
}
}
func (f *FileCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
conf, err := conf.ValidateConfig(f.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
filePattern, err := config.ReadStringValue(conf, "file_pattern")
if err != nil {
return &data.DiagnosticBundle{}, err
}
var frameErrors []error
// this util package supports recursive file matching e.g. /**/*
matches, err := filepathx.Glob(filePattern)
if err != nil {
return &data.DiagnosticBundle{}, errors.Wrapf(err, "Invalid file_pattern \"%s\"", filePattern)
}
if len(matches) == 0 {
frameErrors = append(frameErrors, errors.New("0 files match glob pattern"))
return &data.DiagnosticBundle{
Errors: data.FrameErrors{Errors: frameErrors},
}, nil
}
var filePaths []string
for _, match := range matches {
fi, err := os.Stat(match)
if err != nil {
frameErrors = append(frameErrors, errors.Wrapf(err, "Unable to read file %s", match))
}
if !fi.IsDir() {
log.Debug().Msgf("Collecting file %s", match)
filePaths = append(filePaths, match)
}
}
frame := data.NewFileFrame("collection", filePaths)
return &data.DiagnosticBundle{
Errors: data.FrameErrors{Errors: frameErrors},
Frames: map[string]data.Frame{
"collection": frame,
},
}, nil
}
func (f *FileCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
AllowEmpty: false,
},
},
}
}
func (f *FileCollector) IsDefault() bool {
return false
}
func (f *FileCollector) Description() string {
return "Allows collection of user specified files"
}
// here we register the collector for use
func init() {
collectors.Register("file", func() (collectors.Collector, error) {
return &FileCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,110 +0,0 @@
package system_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestFileConfiguration(t *testing.T) {
t.Run("correct configuration is returned for file collector", func(t *testing.T) {
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
conf := fileCollector.Configuration()
require.Len(t, conf.Params, 1)
require.IsType(t, config.StringParam{}, conf.Params[0])
filePattern, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.True(t, filePattern.Required())
require.Equal(t, filePattern.Name(), "file_pattern")
require.Equal(t, "", filePattern.Value)
})
}
func TestFileCollect(t *testing.T) {
t.Run("test filter patterns work", func(t *testing.T) {
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
bundle, err := fileCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "../../../testdata/**/*.xml",
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
AllowEmpty: false,
},
},
})
require.Nil(t, err)
require.Nil(t, bundle.Errors.Errors)
checkFileBundle(t, bundle,
[]string{"../../../testdata/configs/include/xml/server-include.xml",
"../../../testdata/configs/include/xml/user-include.xml",
"../../../testdata/configs/xml/config.xml",
"../../../testdata/configs/xml/users.xml",
"../../../testdata/configs/xml/users.d/default-password.xml",
"../../../testdata/configs/yandex_xml/config.xml",
"../../../testdata/docker/admin.xml",
"../../../testdata/docker/custom.xml"})
})
t.Run("invalid file patterns are detected", func(t *testing.T) {
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
bundle, err := fileCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
AllowEmpty: false,
},
},
})
require.NotNil(t, err)
require.Equal(t, "parameter file_pattern is invalid - file_pattern cannot be empty", err.Error())
require.Equal(t, &data.DiagnosticBundle{}, bundle)
})
t.Run("check empty matches are reported", func(t *testing.T) {
fileCollector := system.NewFileCollector(&platform.ResourceManager{})
bundle, err := fileCollector.Collect(config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "../../../testdata/**/*.random",
Param: config.NewParam("file_pattern", "Glob based pattern to specify files for collection", true),
AllowEmpty: false,
},
},
})
require.Nil(t, err)
require.Nil(t, bundle.Frames)
require.Len(t, bundle.Errors.Errors, 1)
require.Equal(t, "0 files match glob pattern", bundle.Errors.Errors[0].Error())
})
}
func checkFileBundle(t *testing.T, bundle *data.DiagnosticBundle, expectedFiles []string) {
require.NotNil(t, bundle)
require.Nil(t, bundle.Errors.Errors)
require.Len(t, bundle.Frames, 1)
require.Contains(t, bundle.Frames, "collection")
dirFrame, ok := bundle.Frames["collection"].(data.FileFrame)
require.True(t, ok)
require.Equal(t, []string{"files"}, dirFrame.Columns())
i := 0
for {
values, ok, err := dirFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Len(t, values, 1)
file, ok := values[0].(data.SimpleFile)
require.True(t, ok)
require.Contains(t, expectedFiles, file.FilePath())
i += 1
}
require.Equal(t, len(expectedFiles), i)
}

View File

@ -1,235 +0,0 @@
package system
import (
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/elastic/gosigar"
"github.com/jaypipes/ghw"
"github.com/matishsiao/goInfo"
"github.com/pkg/errors"
)
// This collector collects the system overview
type SystemCollector struct {
resourceManager *platform.ResourceManager
}
func NewSystemCollector(m *platform.ResourceManager) *SystemCollector {
return &SystemCollector{
resourceManager: m,
}
}
func (sc *SystemCollector) Collect(conf config.Configuration) (*data.DiagnosticBundle, error) {
_, err := conf.ValidateConfig(sc.Configuration())
if err != nil {
return &data.DiagnosticBundle{}, err
}
frames := make(map[string]data.Frame)
var frameErrors []error
frameErrors = addStatsToFrame(frames, frameErrors, "disks", getDisk)
frameErrors = addStatsToFrame(frames, frameErrors, "disk_usage", getDiskUsage)
frameErrors = addStatsToFrame(frames, frameErrors, "memory", getMemory)
frameErrors = addStatsToFrame(frames, frameErrors, "memory_usage", getMemoryUsage)
frameErrors = addStatsToFrame(frames, frameErrors, "cpu", getCPU)
//frameErrors = addStatsToFrame(frames, frameErrors, "cpu_usage", getCPUUsage)
frameErrors = addStatsToFrame(frames, frameErrors, "processes", getProcessList)
frameErrors = addStatsToFrame(frames, frameErrors, "os", getHostDetails)
return &data.DiagnosticBundle{
Frames: frames,
Errors: data.FrameErrors{
Errors: frameErrors,
},
}, err
}
func addStatsToFrame(frames map[string]data.Frame, errors []error, name string, statFunc func() (data.MemoryFrame, error)) []error {
frame, err := statFunc()
if err != nil {
errors = append(errors, err)
}
frames[name] = frame
return errors
}
func (sc *SystemCollector) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{},
}
}
func (sc *SystemCollector) IsDefault() bool {
return true
}
func getDisk() (data.MemoryFrame, error) {
block, err := ghw.Block()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list block storage")
}
var rows [][]interface{}
columns := []string{"name", "size", "physicalBlockSize", "driveType", "controller", "vendor", "model", "partitionName", "partitionSize", "mountPoint", "readOnly"}
for _, disk := range block.Disks {
for _, part := range disk.Partitions {
rows = append(rows, []interface{}{disk.Name, disk.SizeBytes, disk.PhysicalBlockSizeBytes, disk.DriveType, disk.StorageController, disk.Vendor, disk.Model, part.Name, part.SizeBytes, part.MountPoint, part.IsReadOnly})
}
}
return data.NewMemoryFrame("disk_usage", columns, rows), nil
}
func getDiskUsage() (data.MemoryFrame, error) {
fsList := gosigar.FileSystemList{}
err := fsList.Get()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list filesystems for usage")
}
rows := make([][]interface{}, len(fsList.List))
columns := []string{"filesystem", "size", "used", "avail", "use%", "mounted on"}
for i, fs := range fsList.List {
dirName := fs.DirName
usage := gosigar.FileSystemUsage{}
err = usage.Get(dirName)
if err == nil {
rows[i] = []interface{}{fs.DevName, usage.Total, usage.Used, usage.Avail, usage.UsePercent(), dirName}
} else {
// we try to output something
rows[i] = []interface{}{fs.DevName, 0, 0, 0, 0, dirName}
}
}
return data.NewMemoryFrame("disk_usage", columns, rows), nil
}
func getMemory() (data.MemoryFrame, error) {
memory, err := ghw.Memory()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read memory")
}
columns := []string{"totalPhysical", "totalUsable", "supportedPageSizes"}
rows := make([][]interface{}, 1)
rows[0] = []interface{}{memory.TotalPhysicalBytes, memory.TotalUsableBytes, memory.SupportedPageSizes}
return data.NewMemoryFrame("memory", columns, rows), nil
}
func getMemoryUsage() (data.MemoryFrame, error) {
mem := gosigar.Mem{}
swap := gosigar.Swap{}
err := mem.Get()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read memory usage")
}
err = swap.Get()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to read swap")
}
columns := []string{"type", "total", "used", "free"}
rows := make([][]interface{}, 3)
rows[0] = []interface{}{"mem", mem.Total, mem.Used, mem.Free}
rows[1] = []interface{}{"buffers/cache", 0, mem.ActualUsed, mem.ActualFree}
rows[2] = []interface{}{"swap", swap.Total, swap.Used, swap.Free}
return data.NewMemoryFrame("memory_usage", columns, rows), nil
}
func getCPU() (data.MemoryFrame, error) {
cpu, err := ghw.CPU()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list cpus")
}
columns := []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}
var rows [][]interface{}
for _, proc := range cpu.Processors {
for _, core := range proc.Cores {
rows = append(rows, []interface{}{proc.ID, proc.Vendor, proc.Model, core.ID, core.NumThreads, core.LogicalProcessors, strings.Join(proc.Capabilities, " ")})
}
}
return data.NewMemoryFrame("cpu", columns, rows), nil
}
// this gets cpu usage vs a listing of arch etc - see getCPU(). This needs successive values as its ticks - not currently used
// see https://github.com/elastic/beats/blob/master/metricbeat/internal/metrics/cpu/metrics.go#L131 for inspiration
//nolint
func getCPUUsage() (data.MemoryFrame, error) {
cpuList := gosigar.CpuList{}
err := cpuList.Get()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list cpus for usage")
}
columns := []string{"sys", "nice", "stolen", "irq", "idle", "softIrq", "user", "wait", "total"}
rows := make([][]interface{}, len(cpuList.List), len(cpuList.List))
for i, cpu := range cpuList.List {
rows[i] = []interface{}{cpu.Sys, cpu.Nice, cpu.Stolen, cpu.Irq, cpu.Idle, cpu.SoftIrq, cpu.User, cpu.Wait, cpu.Total()}
}
return data.NewMemoryFrame("cpu_usage", columns, rows), nil
}
func getProcessList() (data.MemoryFrame, error) {
pidList := gosigar.ProcList{}
err := pidList.Get()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to list processes")
}
columns := []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}
rows := make([][]interface{}, len(pidList.List))
for i, pid := range pidList.List {
state := gosigar.ProcState{}
mem := gosigar.ProcMem{}
time := gosigar.ProcTime{}
args := gosigar.ProcArgs{}
if err := state.Get(pid); err != nil {
continue
}
if err := mem.Get(pid); err != nil {
continue
}
if err := time.Get(pid); err != nil {
continue
}
if err := args.Get(pid); err != nil {
continue
}
rows[i] = []interface{}{pid, state.Ppid, time.FormatStartTime(), time.FormatTotal(), mem.Resident, mem.Size,
mem.PageFaults, mem.MinorFaults, mem.MajorFaults, state.Username, state.State, state.Priority, state.Nice,
strings.Join(args.List, " ")}
}
return data.NewMemoryFrame("process_list", columns, rows), nil
}
func getHostDetails() (data.MemoryFrame, error) {
gi, err := goInfo.GetInfo()
if err != nil {
return data.MemoryFrame{}, errors.Wrapf(err, "unable to get host summary")
}
columns := []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}
rows := [][]interface{}{
{gi.Hostname, gi.OS, gi.GoOS, gi.CPUs, gi.Core, gi.Kernel, gi.Platform},
}
return data.NewMemoryFrame("os", columns, rows), nil
}
func (sc *SystemCollector) Description() string {
return "Collects summary OS and hardware statistics for the host"
}
// here we register the collector for use
func init() {
collectors.Register("system", func() (collectors.Collector, error) {
return &SystemCollector{
resourceManager: platform.GetResourceManager(),
}, nil
})
}

View File

@ -1,89 +0,0 @@
package system_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestSystemConfiguration(t *testing.T) {
t.Run("correct configuration is returned for system collector", func(t *testing.T) {
systemCollector := system.NewSystemCollector(&platform.ResourceManager{})
conf := systemCollector.Configuration()
require.Len(t, conf.Params, 0)
require.Equal(t, []config.ConfigParam{}, conf.Params)
})
}
func TestSystemCollect(t *testing.T) {
t.Run("test default system collection", func(t *testing.T) {
systemCollector := system.NewSystemCollector(&platform.ResourceManager{})
diagSet, err := systemCollector.Collect(config.Configuration{})
require.Nil(t, err)
require.NotNil(t, diagSet)
require.Len(t, diagSet.Errors.Errors, 0)
require.Len(t, diagSet.Frames, 7)
require.Contains(t, diagSet.Frames, "disks")
require.Contains(t, diagSet.Frames, "disk_usage")
require.Contains(t, diagSet.Frames, "memory")
require.Contains(t, diagSet.Frames, "memory_usage")
require.Contains(t, diagSet.Frames, "cpu")
require.Contains(t, diagSet.Frames, "processes")
require.Contains(t, diagSet.Frames, "os")
// responses here will vary depending on platform - mocking seems excessive so we test we have some data
// disks
require.Equal(t, []string{"name", "size", "physicalBlockSize", "driveType", "controller", "vendor", "model", "partitionName", "partitionSize", "mountPoint", "readOnly"}, diagSet.Frames["disks"].Columns())
diskFrames, err := countFrameRows(diagSet, "disks")
require.Greater(t, diskFrames, 0)
require.Nil(t, err)
// disk usage
require.Equal(t, []string{"filesystem", "size", "used", "avail", "use%", "mounted on"}, diagSet.Frames["disk_usage"].Columns())
diskUsageFrames, err := countFrameRows(diagSet, "disk_usage")
require.Greater(t, diskUsageFrames, 0)
require.Nil(t, err)
// memory
require.Equal(t, []string{"totalPhysical", "totalUsable", "supportedPageSizes"}, diagSet.Frames["memory"].Columns())
memoryFrames, err := countFrameRows(diagSet, "memory")
require.Greater(t, memoryFrames, 0)
require.Nil(t, err)
// memory_usage
require.Equal(t, []string{"type", "total", "used", "free"}, diagSet.Frames["memory_usage"].Columns())
memoryUsageFrames, err := countFrameRows(diagSet, "memory_usage")
require.Greater(t, memoryUsageFrames, 0)
require.Nil(t, err)
// cpu
require.Equal(t, []string{"processor", "vendor", "model", "core", "numThreads", "logical", "capabilities"}, diagSet.Frames["cpu"].Columns())
cpuFrames, err := countFrameRows(diagSet, "cpu")
require.Greater(t, cpuFrames, 0)
require.Nil(t, err)
// processes
require.Equal(t, []string{"pid", "ppid", "stime", "time", "rss", "size", "faults", "minorFaults", "majorFaults", "user", "state", "priority", "nice", "command"}, diagSet.Frames["processes"].Columns())
processesFrames, err := countFrameRows(diagSet, "processes")
require.Greater(t, processesFrames, 0)
require.Nil(t, err)
// os
require.Equal(t, []string{"hostname", "os", "goOs", "cpus", "core", "kernel", "platform"}, diagSet.Frames["os"].Columns())
osFrames, err := countFrameRows(diagSet, "os")
require.Greater(t, osFrames, 0)
require.Nil(t, err)
})
}
func countFrameRows(diagSet *data.DiagnosticBundle, frameName string) (int, error) {
frame := diagSet.Frames[frameName]
i := 0
for {
_, ok, err := frame.Next()
if !ok {
return i, err
}
if err != nil {
return i, err
}
i++
}
}

View File

@ -1,344 +0,0 @@
package file
import (
"context"
"encoding/csv"
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/mholt/archiver/v4"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
const OutputName = "simple"
type SubFolderGenerator func() string
type SimpleOutput struct {
// mainly used for testing to make sub folder deterministic - which it won't be by default as it uses a timestamp
FolderGenerator SubFolderGenerator
}
func (o SimpleOutput) Write(id string, bundles map[string]*data.DiagnosticBundle, conf config.Configuration) (data.FrameErrors, error) {
conf, err := conf.ValidateConfig(o.Configuration())
if err != nil {
return data.FrameErrors{}, err
}
directory, err := config.ReadStringValue(conf, "directory")
if err != nil {
return data.FrameErrors{}, err
}
directory, err = getWorkingDirectory(directory)
if err != nil {
return data.FrameErrors{}, err
}
subFolder := strconv.FormatInt(utils.MakeTimestamp(), 10)
if o.FolderGenerator != nil {
subFolder = o.FolderGenerator()
}
skipArchive, err := config.ReadBoolValue(conf, "skip_archive")
if err != nil {
return data.FrameErrors{}, err
}
outputDir := filepath.Join(directory, id, subFolder)
log.Info().Msgf("creating bundle in %s", outputDir)
if err := os.MkdirAll(outputDir, os.ModePerm); err != nil {
return data.FrameErrors{}, err
}
frameErrors := data.FrameErrors{}
var filePaths []string
for name := range bundles {
bundlePaths, frameError := writeDiagnosticBundle(name, bundles[name], outputDir)
filePaths = append(filePaths, bundlePaths...)
frameErrors.Errors = append(frameErrors.Errors, frameError.Errors...)
}
log.Info().Msg("bundle created")
if !skipArchive {
archiveFilename := filepath.Join(directory, id, fmt.Sprintf("%s.tar.gz", subFolder))
log.Info().Msgf("compressing bundle to %s", archiveFilename)
// produce a map containing the input paths to the archive paths - we preserve the output directory and hierarchy
archiveMap := createArchiveMap(filePaths, directory)
if err := createArchive(archiveFilename, archiveMap); err != nil {
return frameErrors, err
}
// we delete the original directory leaving just the archive behind
if err := os.RemoveAll(outputDir); err != nil {
return frameErrors, err
}
log.Info().Msgf("archive ready at: %s ", archiveFilename)
}
return frameErrors, nil
}
func writeDiagnosticBundle(name string, diag *data.DiagnosticBundle, baseDir string) ([]string, data.FrameErrors) {
diagDir := filepath.Join(baseDir, name)
if err := os.MkdirAll(diagDir, os.ModePerm); err != nil {
return nil, data.FrameErrors{Errors: []error{
errors.Wrapf(err, "unable to create directory for %s", name),
}}
}
frameErrors := data.FrameErrors{}
var filePaths []string
for frameId, frame := range diag.Frames {
fFilePath, errs := writeFrame(frameId, frame, diagDir)
filePaths = append(filePaths, fFilePath...)
if len(errs) > 0 {
// it would be nice if we could wrap this list of errors into something formal but this logs well
frameErrors.Errors = append(frameErrors.Errors, fmt.Errorf("unable to write frame %s for %s", frameId, name))
frameErrors.Errors = append(frameErrors.Errors, errs...)
}
}
return filePaths, frameErrors
}
func writeFrame(frameId string, frame data.Frame, baseDir string) ([]string, []error) {
switch f := frame.(type) {
case data.DatabaseFrame:
return writeDatabaseFrame(frameId, f, baseDir)
case data.ConfigFileFrame:
return writeConfigFrame(frameId, f, baseDir)
case data.DirectoryFileFrame:
return processDirectoryFileFrame(frameId, f, baseDir)
case data.FileFrame:
return processFileFrame(frameId, f, baseDir)
case data.HierarchicalFrame:
return writeHierarchicalFrame(frameId, f, baseDir)
default:
// for now our data frame writer supports all frames
return writeDatabaseFrame(frameId, frame, baseDir)
}
}
func writeHierarchicalFrame(frameId string, frame data.HierarchicalFrame, baseDir string) ([]string, []error) {
filePaths, errs := writeFrame(frameId, frame.DataFrame, baseDir)
for _, subFrame := range frame.SubFrames {
subDir := filepath.Join(baseDir, subFrame.Name())
if err := os.MkdirAll(subDir, os.ModePerm); err != nil {
errs = append(errs, err)
continue
}
subPaths, subErrs := writeFrame(subFrame.Name(), subFrame, subDir)
filePaths = append(filePaths, subPaths...)
errs = append(errs, subErrs...)
}
return filePaths, errs
}
func writeDatabaseFrame(frameId string, frame data.Frame, baseDir string) ([]string, []error) {
frameFilePath := filepath.Join(baseDir, fmt.Sprintf("%s.csv", frameId))
var errs []error
f, err := os.Create(frameFilePath)
if err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
return []string{}, errs
}
defer f.Close()
w := csv.NewWriter(f)
defer w.Flush()
if err := w.Write(frame.Columns()); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to write columns for frame %s", frameId))
return []string{}, errs
}
// we don't collect an error for every line here like configs and logs - could mean a lot of unnecessary noise
for {
values, ok, err := frame.Next()
if err != nil {
errs = append(errs, errors.Wrapf(err, "unable to read frame %s", frameId))
return []string{}, errs
}
if !ok {
return []string{frameFilePath}, errs
}
sValues := make([]string, len(values))
for i, value := range values {
sValues[i] = fmt.Sprintf("%v", value)
}
if err := w.Write(sValues); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to write row for frame %s", frameId))
return []string{}, errs
}
}
}
func writeConfigFrame(frameId string, frame data.ConfigFileFrame, baseDir string) ([]string, []error) {
var errs []error
frameDirectory := filepath.Join(baseDir, frameId)
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
return []string{}, errs
}
// this holds our files included
includesDirectory := filepath.Join(frameDirectory, "includes")
if err := os.MkdirAll(includesDirectory, os.ModePerm); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create includes directory for frame %s", frameId))
return []string{}, errs
}
for {
values, ok, err := frame.Next()
if err != nil {
errs = append(errs, err)
return []string{frameDirectory}, errs
}
if !ok {
return []string{frameDirectory}, errs
}
configFile := values[0].(data.ConfigFile)
if !configFile.IsIncluded() {
relPath := strings.TrimPrefix(configFile.FilePath(), frame.Directory)
destPath := path.Join(frameDirectory, relPath)
if err = configFile.Copy(destPath, true); err != nil {
errs = append(errs, errors.Wrapf(err, "Unable to copy file %s", configFile.FilePath()))
}
} else {
// include files could be anywhere - potentially multiple with the same name. We thus, recreate the directory
// hierarchy under includes to avoid collisions
destPath := path.Join(includesDirectory, configFile.FilePath())
if err = configFile.Copy(destPath, true); err != nil {
errs = append(errs, errors.Wrapf(err, "Unable to copy file %s", configFile.FilePath()))
}
}
}
}
func processDirectoryFileFrame(frameId string, frame data.DirectoryFileFrame, baseDir string) ([]string, []error) {
var errs []error
// each set of files goes under its own directory to preserve grouping
frameDirectory := filepath.Join(baseDir, frameId)
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
return []string{}, errs
}
for {
values, ok, err := frame.Next()
if err != nil {
errs = append(errs, err)
return []string{frameDirectory}, errs
}
if !ok {
return []string{frameDirectory}, errs
}
file := values[0].(data.SimpleFile)
relPath := strings.TrimPrefix(file.FilePath(), frame.Directory)
destPath := path.Join(frameDirectory, relPath)
if err = file.Copy(destPath, true); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to copy file %s for frame %s", file, frameId))
}
}
}
func processFileFrame(frameId string, frame data.FileFrame, baseDir string) ([]string, []error) {
var errs []error
frameDirectory := filepath.Join(baseDir, frameId)
if err := os.MkdirAll(frameDirectory, os.ModePerm); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create directory for frame %s", frameId))
return []string{}, errs
}
for {
values, ok, err := frame.Next()
if err != nil {
errs = append(errs, err)
}
if !ok {
return []string{frameDirectory}, errs
}
file := values[0].(data.SimpleFile)
// we need an absolute path to preserve the directory hierarchy
dir, err := filepath.Abs(filepath.Dir(file.FilePath()))
if err != nil {
errs = append(errs, errors.Wrapf(err, "unable to determine dir for %s", file.FilePath()))
}
outputDir := filepath.Join(frameDirectory, dir)
if _, err := os.Stat(outputDir); os.IsNotExist(err) {
if err := os.MkdirAll(outputDir, os.ModePerm); err != nil {
errs = append(errs, errors.Wrapf(err, "unable to create directory for %s", file.FilePath()))
} else {
outputPath := filepath.Join(outputDir, filepath.Base(file.FilePath()))
err = file.Copy(outputPath, false)
if err != nil {
errs = append(errs, errors.Wrapf(err, "unable to copy file %s", file.FilePath()))
}
}
}
}
}
func getWorkingDirectory(path string) (string, error) {
if !filepath.IsAbs(path) {
workingPath, err := os.Getwd()
if err != nil {
return "", err
}
return filepath.Join(workingPath, path), nil
}
return path, nil
}
func createArchiveMap(filePaths []string, prefix string) map[string]string {
archiveMap := make(map[string]string)
for _, path := range filePaths {
archiveMap[path] = strings.TrimPrefix(path, prefix)
}
return archiveMap
}
func createArchive(outputFile string, filePaths map[string]string) error {
files, err := archiver.FilesFromDisk(nil, filePaths)
if err != nil {
return err
}
out, err := os.Create(outputFile)
if err != nil {
return err
}
defer out.Close()
format := archiver.CompressedArchive{
Compression: archiver.Gz{},
Archival: archiver.Tar{},
}
err = format.Archive(context.Background(), out, files)
return err
}
func (o SimpleOutput) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "./",
Param: config.NewParam("directory", "Directory in which to create dump. Defaults to the current directory.", false),
},
config.StringOptions{
Value: "csv",
// TODO: add tsv and others here later
Options: []string{"csv"},
Param: config.NewParam("format", "Format of exported files", false),
},
config.BoolParam{
Value: false,
Param: config.NewParam("skip_archive", "Don't compress output to an archive", false),
},
},
}
}
func (o SimpleOutput) Description() string {
return "Writes out the diagnostic bundle as files in a structured directory, optionally producing a compressed archive."
}
// here we register the output for use
func init() {
outputs.Register(OutputName, func() (outputs.Output, error) {
return SimpleOutput{}, nil
})
}

View File

@ -1,468 +0,0 @@
package file_test
import (
"bufio"
"encoding/xml"
"fmt"
"io"
"os"
"path"
"strings"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
)
var clusterFrame = test.NewFakeDataFrame("clusters", []string{"cluster", "shard_num", "shard_weight", "replica_num", "host_name", "host_address", "port", "is_local", "user", "default_database", "errors_count", "slowdowns_count", "estimated_recovery_time"},
[][]interface{}{
{"events", 1, 1, 1, "dalem-local-clickhouse-blue-1", "192.168.144.2", 9000, 1, "default", "", 0, 0, 0},
{"events", 2, 1, 1, "dalem-local-clickhouse-blue-2", "192.168.144.4", 9001, 1, "default", "", 0, 0, 0},
{"events", 3, 1, 1, "dalem-local-clickhouse-blue-3", "192.168.144.3", 9002, 1, "default", "", 0, 0, 0},
},
)
var diskFrame = test.NewFakeDataFrame("disks", []string{"name", "path", "free_space", "total_space", "keep_free_space", "type"},
[][]interface{}{
{"default", "/var/lib/clickhouse", 1729659346944, 1938213220352, "", "local"},
},
)
var userFrame = test.NewFakeDataFrame("users", []string{"name", "id", "storage", "auth_type", "auth_params", "host_ip", "host_names", "host_names_regexp", "host_names_like"},
[][]interface{}{
{"default", "94309d50-4f52-5250-31bd-74fecac179db,users.xml,plaintext_password", "sha256_password", []string{"::0"}, []string{}, []string{}, []string{}},
},
)
func TestConfiguration(t *testing.T) {
t.Run("correct configuration is returned", func(t *testing.T) {
output := file.SimpleOutput{}
conf := output.Configuration()
require.Len(t, conf.Params, 3)
// check first directory param
require.IsType(t, config.StringParam{}, conf.Params[0])
directory, ok := conf.Params[0].(config.StringParam)
require.True(t, ok)
require.False(t, directory.Required())
require.Equal(t, "directory", directory.Name())
require.Equal(t, "./", directory.Value)
// check second format param
require.IsType(t, config.StringOptions{}, conf.Params[1])
format, ok := conf.Params[1].(config.StringOptions)
require.True(t, ok)
require.False(t, format.Required())
require.Equal(t, "format", format.Name())
require.Equal(t, "csv", format.Value)
require.Equal(t, []string{"csv"}, format.Options)
// check third format compress
require.IsType(t, config.BoolParam{}, conf.Params[2])
skipArchive, ok := conf.Params[2].(config.BoolParam)
require.True(t, ok)
require.False(t, format.Required())
require.False(t, skipArchive.Value)
})
}
func TestWrite(t *testing.T) {
bundles := map[string]*data.DiagnosticBundle{
"systemA": {
Frames: map[string]data.Frame{
"disk": diskFrame,
"cluster": clusterFrame,
},
},
"systemB": {
Frames: map[string]data.Frame{
"user": userFrame,
},
},
}
t.Run("test we can write simple diagnostic sets", func(t *testing.T) {
tempDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: tempDir,
},
// turn compression off as the folder will be deleted by default
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", bundles, configuration)
require.Nil(t, err)
require.Equal(t, data.FrameErrors{}, frameErrors)
clusterFile := path.Join(tempDir, "test", "test", "systemA", "cluster.csv")
diskFile := path.Join(tempDir, "test", "test", "systemA", "disk.csv")
userFile := path.Join(tempDir, "test", "test", "systemB", "user.csv")
require.FileExists(t, clusterFile)
require.FileExists(t, diskFile)
require.FileExists(t, userFile)
diskLines, err := readFileLines(diskFile)
require.Nil(t, err)
require.Len(t, diskLines, 2)
usersLines, err := readFileLines(userFile)
require.Nil(t, err)
require.Len(t, usersLines, 2)
clusterLines, err := readFileLines(clusterFile)
require.Nil(t, err)
require.Len(t, clusterLines, 4)
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), clusterLines[0])
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", clusterLines[1])
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", clusterLines[2])
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", clusterLines[3])
resetFrames()
})
t.Run("test invalid parameter", func(t *testing.T) {
tempDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: tempDir,
},
config.StringOptions{
Value: "random",
Options: []string{"csv"},
// TODO: add tsv and others here later
Param: config.NewParam("format", "Format of exported files", false),
},
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip compressed archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", bundles, configuration)
require.Equal(t, data.FrameErrors{}, frameErrors)
require.NotNil(t, err)
require.Equal(t, "parameter format is invalid - random is not a valid value for format - [csv]", err.Error())
resetFrames()
})
t.Run("test compression", func(t *testing.T) {
tempDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: tempDir,
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", bundles, configuration)
require.Nil(t, err)
require.Equal(t, data.FrameErrors{}, frameErrors)
archiveFileName := path.Join(tempDir, "test", "test.tar.gz")
fi, err := os.Stat(archiveFileName)
require.Nil(t, err)
require.FileExists(t, archiveFileName)
// compression will vary so lets test range
require.Greater(t, int64(600), fi.Size())
require.Less(t, int64(200), fi.Size())
outputFolder := path.Join(tempDir, "test", "test")
// check the folder doesn't exist and is cleaned up
require.NoFileExists(t, outputFolder)
resetFrames()
})
t.Run("test support for directory frames", func(t *testing.T) {
// create 5 temporary files
tempDir := t.TempDir()
files := createRandomFiles(tempDir, 5)
dirFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*.log"})
require.Empty(t, errs)
fileBundles := map[string]*data.DiagnosticBundle{
"systemA": {
Frames: map[string]data.Frame{
"disk": diskFrame,
"cluster": clusterFrame,
},
},
"config": {
Frames: map[string]data.Frame{
"logs": dirFrame,
},
},
}
destDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: destDir,
},
// turn compression off as the folder will be deleted by default
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", fileBundles, configuration)
require.Nil(t, err)
require.NotNil(t, frameErrors)
// test the usual frames still work
clusterFile := path.Join(destDir, "test", "test", "systemA", "cluster.csv")
diskFile := path.Join(destDir, "test", "test", "systemA", "disk.csv")
require.FileExists(t, clusterFile)
require.FileExists(t, diskFile)
diskLines, err := readFileLines(diskFile)
require.Nil(t, err)
require.Len(t, diskLines, 2)
clusterLines, err := readFileLines(clusterFile)
require.Nil(t, err)
require.Len(t, clusterLines, 4)
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), clusterLines[0])
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", clusterLines[1])
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", clusterLines[2])
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", clusterLines[3])
//test our directory frame
for _, filepath := range files {
// check they were copied
subPath := strings.TrimPrefix(filepath, tempDir)
// path here will be <destDir>/<id>/test>/config/logs/<sub path>
newPath := path.Join(destDir, "test", "test", "config", "logs", subPath)
require.FileExists(t, newPath)
}
resetFrames()
})
t.Run("test support for config frames", func(t *testing.T) {
xmlConfig := data.XmlConfig{
XMLName: xml.Name{},
Clickhouse: data.XmlLoggerConfig{
XMLName: xml.Name{},
ErrorLog: "/var/log/clickhouse-server/clickhouse-server.err.log",
Log: "/var/log/clickhouse-server/clickhouse-server.log",
},
IncludeFrom: "",
}
tempDir := t.TempDir()
confDir := path.Join(tempDir, "conf")
// create an includes file
includesDir := path.Join(tempDir, "includes")
err := os.MkdirAll(includesDir, os.ModePerm)
require.Nil(t, err)
includesPath := path.Join(includesDir, "random.xml")
includesFile, err := os.Create(includesPath)
require.Nil(t, err)
xmlWriter := io.Writer(includesFile)
enc := xml.NewEncoder(xmlWriter)
enc.Indent(" ", " ")
err = enc.Encode(xmlConfig)
require.Nil(t, err)
// create 5 temporary config files
files := make([]string, 5)
// set the includes
xmlConfig.IncludeFrom = includesPath
for i := 0; i < 5; i++ {
// we want to check hierarchies are preserved so create a simple folder for each file
fileDir := path.Join(confDir, fmt.Sprintf("%d", i))
err := os.MkdirAll(fileDir, os.ModePerm)
require.Nil(t, err)
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.xml", i))
files[i] = filepath
xmlFile, err := os.Create(filepath)
require.Nil(t, err)
// write a little xml so its valid
xmlWriter := io.Writer(xmlFile)
enc := xml.NewEncoder(xmlWriter)
enc.Indent(" ", " ")
err = enc.Encode(xmlConfig)
require.Nil(t, err)
}
configFrame, errs := data.NewConfigFileFrame(confDir)
require.Empty(t, errs)
fileBundles := map[string]*data.DiagnosticBundle{
"systemA": {
Frames: map[string]data.Frame{
"disk": diskFrame,
"cluster": clusterFrame,
},
},
"config": {
Frames: map[string]data.Frame{
"user_specified": configFrame,
},
},
}
destDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: destDir,
},
// turn compression off as the folder will be deleted by default
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", fileBundles, configuration)
require.Nil(t, err)
require.NotNil(t, frameErrors)
require.Empty(t, frameErrors.Errors)
//test our config frame
for _, filepath := range files {
// check they were copied
subPath := strings.TrimPrefix(filepath, confDir)
// path here will be <destDir>/<id>/test>/config/user_specified/file
newPath := path.Join(destDir, "test", "test", "config", "user_specified", subPath)
require.FileExists(t, newPath)
}
// check our includes file exits
// path here will be <destDir>/<id>/test>/config/user_specified/file/includes
require.FileExists(t, path.Join(destDir, "test", "test", "config", "user_specified", "includes", includesPath))
resetFrames()
})
t.Run("test support for file frames", func(t *testing.T) {
// create 5 temporary files
tempDir := t.TempDir()
files := createRandomFiles(tempDir, 5)
fileFrame := data.NewFileFrame("collection", files)
fileBundles := map[string]*data.DiagnosticBundle{
"systemA": {
Frames: map[string]data.Frame{
"disk": diskFrame,
"cluster": clusterFrame,
},
},
"file": {
Frames: map[string]data.Frame{
"collection": fileFrame,
},
},
}
destDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: destDir,
},
// turn compression off as the folder will be deleted by default
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
frameErrors, err := output.Write("test", fileBundles, configuration)
require.Nil(t, err)
require.NotNil(t, frameErrors)
//test our directory frame
for _, filepath := range files {
// path here will be <destDir>/<id>/test>/file/collection/<sub path>
newPath := path.Join(destDir, "test", "test", "file", "collection", filepath)
require.FileExists(t, newPath)
}
resetFrames()
})
t.Run("test support for hierarchical frames", func(t *testing.T) {
bottomFrame := data.NewHierarchicalFrame("bottomLevel", userFrame, []data.HierarchicalFrame{})
middleFrame := data.NewHierarchicalFrame("middleLevel", diskFrame, []data.HierarchicalFrame{bottomFrame})
topFrame := data.NewHierarchicalFrame("topLevel", clusterFrame, []data.HierarchicalFrame{middleFrame})
tempDir := t.TempDir()
configuration := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Param: config.NewParam("directory", "A directory", true),
Value: tempDir,
},
// turn compression off as the folder will be deleted by default
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Skip archive", false),
},
},
}
output := file.SimpleOutput{FolderGenerator: staticFolderName}
hierarchicalBundle := map[string]*data.DiagnosticBundle{
"systemA": {
Frames: map[string]data.Frame{
"topLevel": topFrame,
},
},
}
frameErrors, err := output.Write("test", hierarchicalBundle, configuration)
require.Nil(t, err)
require.Equal(t, data.FrameErrors{}, frameErrors)
topFile := path.Join(tempDir, "test", "test", "systemA", "topLevel.csv")
middleFile := path.Join(tempDir, "test", "test", "systemA", "middleLevel", "middleLevel.csv")
bottomFile := path.Join(tempDir, "test", "test", "systemA", "middleLevel", "bottomLevel", "bottomLevel.csv")
require.FileExists(t, topFile)
require.FileExists(t, middleFile)
require.FileExists(t, bottomFile)
topLines, err := readFileLines(topFile)
require.Nil(t, err)
require.Len(t, topLines, 4)
middleLines, err := readFileLines(middleFile)
require.Nil(t, err)
require.Len(t, middleLines, 2)
bottomLines, err := readFileLines(bottomFile)
require.Nil(t, err)
require.Len(t, bottomLines, 2)
require.Equal(t, strings.Join(clusterFrame.ColumnNames, ","), topLines[0])
require.Equal(t, "events,1,1,1,dalem-local-clickhouse-blue-1,192.168.144.2,9000,1,default,,0,0,0", topLines[1])
require.Equal(t, "events,2,1,1,dalem-local-clickhouse-blue-2,192.168.144.4,9001,1,default,,0,0,0", topLines[2])
require.Equal(t, "events,3,1,1,dalem-local-clickhouse-blue-3,192.168.144.3,9002,1,default,,0,0,0", topLines[3])
resetFrames()
})
}
func createRandomFiles(tempDir string, num int) []string {
files := make([]string, num)
for i := 0; i < 5; i++ {
// we want to check hierarchies are preserved so create a simple folder for each file
fileDir := path.Join(tempDir, fmt.Sprintf("%d", i))
os.MkdirAll(fileDir, os.ModePerm) //nolint:errcheck
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.log", i))
files[i] = filepath
os.Create(filepath) //nolint:errcheck
}
return files
}
func resetFrames() {
clusterFrame.Reset()
userFrame.Reset()
diskFrame.Reset()
}
func readFileLines(filename string) ([]string, error) {
file, err := os.Open(filename)
if err != nil {
return nil, err
}
defer file.Close()
var lines []string
scanner := bufio.NewScanner(file)
for scanner.Scan() {
lines = append(lines, scanner.Text())
}
return lines, scanner.Err()
}
func staticFolderName() string {
return "test"
}

View File

@ -1,67 +0,0 @@
package outputs
import (
"fmt"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
type Output interface {
Write(id string, bundles map[string]*data.DiagnosticBundle, config config.Configuration) (data.FrameErrors, error)
Configuration() config.Configuration
Description() string
// TODO: we will need to implement this for the convert function
//Read(config config.Configuration) (data.DiagnosticBundle, error)
}
// Register can be called from init() on an output in this package
// It will automatically be added to the Outputs map to be called externally
func Register(name string, output OutputFactory) {
// names must be unique
if _, ok := Outputs[name]; ok {
log.Error().Msgf("More than 1 output is trying to register under the name %s. Names must be unique.", name)
}
Outputs[name] = output
}
// OutputFactory lets us use a closure to get instances of the output struct
type OutputFactory func() (Output, error)
var Outputs = map[string]OutputFactory{}
func GetOutputNames() []string {
outputs := make([]string, len(Outputs))
i := 0
for k := range Outputs {
outputs[i] = k
i++
}
return outputs
}
func GetOutputByName(name string) (Output, error) {
if outputFactory, ok := Outputs[name]; ok {
//do something here
output, err := outputFactory()
if err != nil {
return nil, errors.Wrapf(err, "output %s could not be initialized", name)
}
return output, nil
}
return nil, fmt.Errorf("%s is not a valid output name", name)
}
func BuildConfigurationOptions() (map[string]config.Configuration, error) {
configurations := make(map[string]config.Configuration)
for name, collectorFactory := range Outputs {
output, err := collectorFactory()
if err != nil {
return nil, errors.Wrapf(err, "output %s could not be initialized", name)
}
configurations[name] = output.Configuration()
}
return configurations, nil
}

View File

@ -1,45 +0,0 @@
package outputs_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/terminal"
"github.com/stretchr/testify/require"
)
func TestGetOutputNames(t *testing.T) {
t.Run("can get all output names", func(t *testing.T) {
outputNames := outputs.GetOutputNames()
require.ElementsMatch(t, []string{"simple", "report"}, outputNames)
})
}
func TestGetOutputByName(t *testing.T) {
t.Run("can get output by name", func(t *testing.T) {
output, err := outputs.GetOutputByName("simple")
require.Nil(t, err)
require.Equal(t, file.SimpleOutput{}, output)
})
t.Run("fails on non existing output", func(t *testing.T) {
output, err := outputs.GetOutputByName("random")
require.NotNil(t, err)
require.Equal(t, "random is not a valid output name", err.Error())
require.Nil(t, output)
})
}
func TestBuildConfigurationOptions(t *testing.T) {
t.Run("can get all output configurations", func(t *testing.T) {
outputs, err := outputs.BuildConfigurationOptions()
require.Nil(t, err)
require.Len(t, outputs, 2)
require.Contains(t, outputs, "simple")
require.Contains(t, outputs, "report")
})
}

View File

@ -1,284 +0,0 @@
package terminal
import (
"bufio"
"fmt"
"os"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/olekukonko/tablewriter"
"github.com/pkg/errors"
)
const OutputName = "report"
type ReportOutput struct {
}
func (r ReportOutput) Write(id string, bundles map[string]*data.DiagnosticBundle, conf config.Configuration) (data.FrameErrors, error) {
conf, err := conf.ValidateConfig(r.Configuration())
if err != nil {
return data.FrameErrors{}, err
}
format, err := config.ReadStringOptionsValue(conf, "format")
if err != nil {
return data.FrameErrors{}, err
}
nonInteractive, err := config.ReadBoolValue(conf, "continue")
if err != nil {
return data.FrameErrors{}, err
}
maxRows, err := config.ReadIntValue(conf, "row_limit")
if err != nil {
return data.FrameErrors{}, err
}
maxColumns, err := config.ReadIntValue(conf, "column_limit")
if err != nil {
return data.FrameErrors{}, err
}
frameErrors := data.FrameErrors{}
for name := range bundles {
frameError := printDiagnosticBundle(name, bundles[name], format, !nonInteractive, int(maxRows), int(maxColumns))
frameErrors.Errors = append(frameErrors.Errors, frameError.Errors...)
}
return data.FrameErrors{}, nil
}
func printDiagnosticBundle(name string, diag *data.DiagnosticBundle, format string, interactive bool, maxRows, maxColumns int) data.FrameErrors {
frameErrors := data.FrameErrors{}
for frameId, frame := range diag.Frames {
printFrameHeader(fmt.Sprintf("%s.%s", name, frameId))
err := printFrame(frame, format, maxRows, maxColumns)
if err != nil {
frameErrors.Errors = append(frameErrors.Errors, err)
}
if interactive {
err := waitForEnter()
if err != nil {
frameErrors.Errors = append(frameErrors.Errors, err)
}
}
}
return frameErrors
}
func waitForEnter() error {
fmt.Println("Press the Enter Key to view the next frame report")
for {
consoleReader := bufio.NewReaderSize(os.Stdin, 1)
input, err := consoleReader.ReadByte()
if err != nil {
return errors.New("Unable to read user input")
}
if input == 3 {
//ctl +c
fmt.Println("Exiting...")
os.Exit(0)
}
if input == 10 {
return nil
}
}
}
func printFrame(frame data.Frame, format string, maxRows, maxColumns int) error {
switch f := frame.(type) {
case data.DatabaseFrame:
return printDatabaseFrame(f, format, maxRows, maxColumns)
case data.ConfigFileFrame:
return printConfigFrame(f, format)
case data.DirectoryFileFrame:
return printDirectoryFileFrame(f, format, maxRows)
case data.HierarchicalFrame:
return printHierarchicalFrame(f, format, maxRows, maxColumns)
default:
// for now our data frame writer supports all frames
return printDatabaseFrame(f, format, maxRows, maxColumns)
}
}
func createTable(format string) *tablewriter.Table {
table := tablewriter.NewWriter(os.Stdout)
if format == "markdown" {
table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
table.SetCenterSeparator("|")
}
return table
}
func printFrameHeader(title string) {
titleTable := tablewriter.NewWriter(os.Stdout)
titleTable.SetHeader([]string{title})
titleTable.SetAutoWrapText(false)
titleTable.SetAutoFormatHeaders(true)
titleTable.SetHeaderAlignment(tablewriter.ALIGN_CENTER)
titleTable.SetRowSeparator("\n")
titleTable.SetHeaderLine(false)
titleTable.SetBorder(false)
titleTable.SetTablePadding("\t") // pad with tabs
titleTable.SetNoWhiteSpace(true)
titleTable.Render()
}
func printHierarchicalFrame(frame data.HierarchicalFrame, format string, maxRows, maxColumns int) error {
err := printDatabaseFrame(frame, format, maxRows, maxColumns)
if err != nil {
return err
}
for _, subFrame := range frame.SubFrames {
err = printHierarchicalFrame(subFrame, format, maxRows, maxColumns)
if err != nil {
return err
}
}
return nil
}
func printDatabaseFrame(frame data.Frame, format string, maxRows, maxColumns int) error {
table := createTable(format)
table.SetAutoWrapText(false)
columns := len(frame.Columns())
if maxColumns > 0 && maxColumns < columns {
columns = maxColumns
}
table.SetHeader(frame.Columns()[:columns])
r := 0
trunColumns := 0
for {
values, ok, err := frame.Next()
if !ok || r == maxRows {
table.Render()
if trunColumns > 0 {
warning(fmt.Sprintf("Truncated %d columns, more available...", trunColumns))
}
if r == maxRows {
warning("Truncated rows, more available...")
}
return err
}
if err != nil {
return err
}
columns := len(values)
// -1 means unlimited
if maxColumns > 0 && maxColumns < columns {
trunColumns = columns - maxColumns
columns = maxColumns
}
row := make([]string, columns)
for i, value := range values {
if i == columns {
break
}
row[i] = fmt.Sprintf("%v", value)
}
table.Append(row)
r++
}
}
// currently we dump the whole config - useless in parts
func printConfigFrame(frame data.Frame, format string) error {
for {
values, ok, err := frame.Next()
if !ok {
return err
}
if err != nil {
return err
}
configFile := values[0].(data.File)
dat, err := os.ReadFile(configFile.FilePath())
if err != nil {
return err
}
// create a table per row - as each will be a file
table := createTable(format)
table.SetAutoWrapText(false)
table.SetAutoFormatHeaders(false)
table.ClearRows()
table.SetHeader([]string{configFile.FilePath()})
table.Append([]string{string(dat)})
table.Render()
}
}
func printDirectoryFileFrame(frame data.Frame, format string, maxRows int) error {
for {
values, ok, err := frame.Next()
if !ok {
return err
}
if err != nil {
return err
}
path := values[0].(data.SimpleFile)
file, err := os.Open(path.FilePath())
if err != nil {
// failure on one file causes rest to be ignored in frame...we could improve this
return errors.Wrapf(err, "Unable to read file %s", path.FilePath())
}
scanner := bufio.NewScanner(file)
i := 0
// create a table per row - as each will be a file
table := createTable(format)
table.SetAutoWrapText(false)
table.SetAutoFormatHeaders(false)
table.ClearRows()
table.SetHeader([]string{path.FilePath()})
for scanner.Scan() {
if i == maxRows {
fmt.Println()
table.Render()
warning("Truncated lines, more available...")
fmt.Print("\n")
break
}
table.Append([]string{scanner.Text()})
i++
}
}
}
// prints a warning
func warning(s string) {
fmt.Printf("\x1b[%dm%v\x1b[0m%s\n", 33, "WARNING: ", s)
}
func (r ReportOutput) Configuration() config.Configuration {
return config.Configuration{
Params: []config.ConfigParam{
config.StringOptions{
Value: "default",
Options: []string{"default", "markdown"},
Param: config.NewParam("format", "Format of tables. Default is terminal friendly.", false),
},
config.BoolParam{
Value: false,
Param: config.NewParam("continue", "Print report with no interaction", false),
},
config.IntParam{
Value: 10,
Param: config.NewParam("row_limit", "Max Rows to print per frame.", false),
},
config.IntParam{
Value: 8,
Param: config.NewParam("column_limit", "Max Columns to print per frame. Negative is unlimited.", false),
},
},
}
}
func (r ReportOutput) Description() string {
return "Writes out the diagnostic bundle to the terminal as a simple report."
}
// here we register the output for use
func init() {
outputs.Register(OutputName, func() (outputs.Output, error) {
return ReportOutput{}, nil
})
}

View File

@ -1,129 +0,0 @@
package config
import (
"fmt"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
)
type ConfigParam interface {
Name() string
Required() bool
Description() string
validate(defaultConfig ConfigParam) error
}
type Configuration struct {
Params []ConfigParam
}
type Param struct {
name string
description string
required bool
}
func NewParam(name string, description string, required bool) Param {
return Param{
name: name,
description: description,
required: required,
}
}
func (bp Param) Name() string {
return bp.name
}
func (bp Param) Required() bool {
return bp.required
}
func (bp Param) Description() string {
return bp.description
}
func (bp Param) validate(defaultConfig ConfigParam) error {
return nil
}
func (c Configuration) GetConfigParam(paramName string) (ConfigParam, error) {
for _, param := range c.Params {
if param.Name() == paramName {
return param, nil
}
}
return nil, fmt.Errorf("%s does not exist", paramName)
}
// ValidateConfig finds the intersection of a config c and a default config. Requires all possible params to be in default.
func (c Configuration) ValidateConfig(defaultConfig Configuration) (Configuration, error) {
var finalParams []ConfigParam
for _, defaultParam := range defaultConfig.Params {
setParam, err := c.GetConfigParam(defaultParam.Name())
if err == nil {
// check the set value is valid
if err := setParam.validate(defaultParam); err != nil {
return Configuration{}, fmt.Errorf("parameter %s is invalid - %s", defaultParam.Name(), err.Error())
}
finalParams = append(finalParams, setParam)
} else if defaultParam.Required() {
return Configuration{}, fmt.Errorf("missing required parameter %s - %s", defaultParam.Name(), err.Error())
} else {
finalParams = append(finalParams, defaultParam)
}
}
return Configuration{
Params: finalParams,
}, nil
}
type StringParam struct {
Param
Value string
AllowEmpty bool
}
func (sp StringParam) validate(defaultConfig ConfigParam) error {
dsp := defaultConfig.(StringParam)
if !dsp.AllowEmpty && strings.TrimSpace(sp.Value) == "" {
return fmt.Errorf("%s cannot be empty", sp.Name())
}
// if the parameter is not required it doesn't matter
return nil
}
type StringListParam struct {
Param
Values []string
}
type StringOptions struct {
Param
Options []string
Value string
AllowEmpty bool
}
func (so StringOptions) validate(defaultConfig ConfigParam) error {
dso := defaultConfig.(StringOptions)
if !dso.AllowEmpty && strings.TrimSpace(so.Value) == "" {
return fmt.Errorf("%s cannot be empty", so.Name())
}
if !utils.Contains(dso.Options, so.Value) {
return fmt.Errorf("%s is not a valid value for %s - %v", so.Value, so.Name(), so.Options)
}
// if the parameter is not required it doesn't matter
return nil
}
type IntParam struct {
Param
Value int64
}
type BoolParam struct {
Param
Value bool
}

View File

@ -1,182 +0,0 @@
package config_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/stretchr/testify/require"
)
var conf = config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
Values: []string{"some", "values"},
Param: config.NewParam("paramA", "", false),
},
config.StringParam{
Value: "random",
Param: config.NewParam("paramB", "", true),
},
config.StringParam{
Value: "",
AllowEmpty: true,
Param: config.NewParam("paramC", "", false),
},
config.StringOptions{
Value: "random",
Options: []string{"random", "very_random", "very_very_random"},
Param: config.NewParam("paramD", "", false),
AllowEmpty: true,
},
},
}
func TestGetConfigParam(t *testing.T) {
t.Run("can find get config param by name", func(t *testing.T) {
paramA, err := conf.GetConfigParam("paramA")
require.Nil(t, err)
require.NotNil(t, paramA)
require.IsType(t, config.StringListParam{}, paramA)
stringListParam, ok := paramA.(config.StringListParam)
require.True(t, ok)
require.False(t, stringListParam.Required())
require.Equal(t, stringListParam.Name(), "paramA")
require.ElementsMatch(t, stringListParam.Values, []string{"some", "values"})
})
t.Run("throws error on missing element", func(t *testing.T) {
paramZ, err := conf.GetConfigParam("paramZ")
require.Nil(t, paramZ)
require.NotNil(t, err)
require.Equal(t, err.Error(), "paramZ does not exist")
})
}
func TestValidateConfig(t *testing.T) {
t.Run("validate adds the default and allows override", func(t *testing.T) {
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "custom",
Param: config.NewParam("paramB", "", true),
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.Nil(t, err)
require.NotNil(t, newConf)
require.Len(t, newConf.Params, 4)
// check first param
require.IsType(t, config.StringListParam{}, newConf.Params[0])
stringListParam, ok := newConf.Params[0].(config.StringListParam)
require.True(t, ok)
require.False(t, stringListParam.Required())
require.Equal(t, stringListParam.Name(), "paramA")
require.ElementsMatch(t, stringListParam.Values, []string{"some", "values"})
// check second param
require.IsType(t, config.StringParam{}, newConf.Params[1])
stringParam, ok := newConf.Params[1].(config.StringParam)
require.True(t, ok)
require.True(t, stringParam.Required())
require.Equal(t, "paramB", stringParam.Name())
require.Equal(t, "custom", stringParam.Value)
})
t.Run("validate errors if missing param", func(t *testing.T) {
//missing required paramB
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
Values: []string{"some", "values"},
Param: config.NewParam("paramA", "", false),
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.Nil(t, newConf.Params)
require.NotNil(t, err)
require.Equal(t, "missing required parameter paramB - paramB does not exist", err.Error())
})
t.Run("validate errors if invalid string value", func(t *testing.T) {
//missing required paramB
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("paramB", "", true),
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.Nil(t, newConf.Params)
require.NotNil(t, err)
require.Equal(t, "parameter paramB is invalid - paramB cannot be empty", err.Error())
})
t.Run("allow empty string value if specified", func(t *testing.T) {
//missing required paramB
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "",
Param: config.NewParam("paramC", "", true),
},
config.StringParam{
Value: "custom",
Param: config.NewParam("paramB", "", true),
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.NotNil(t, newConf.Params)
require.Nil(t, err)
})
t.Run("validate errors if invalid string options value", func(t *testing.T) {
//missing required paramB
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "not_random",
Param: config.NewParam("paramB", "", true),
},
config.StringOptions{
Value: "custom",
// this isn't ideal we need to ensure options are set for this to validate correctly
Options: []string{"random", "very_random", "very_very_random"},
Param: config.NewParam("paramD", "", true),
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.Nil(t, newConf.Params)
require.NotNil(t, err)
require.Equal(t, "parameter paramD is invalid - custom is not a valid value for paramD - [random very_random very_very_random]", err.Error())
})
t.Run("allow empty string value for StringOptions if specified", func(t *testing.T) {
//missing required paramB
customConf := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: "custom",
Param: config.NewParam("paramB", "", true),
},
config.StringOptions{
Param: config.Param{},
// this isn't ideal we need to ensure options are set for this to validate correctly
Options: []string{"random", "very_random", "very_very_random"},
Value: "",
},
},
}
newConf, err := customConf.ValidateConfig(conf)
require.NotNil(t, newConf.Params)
require.Nil(t, err)
})
//TODO: Do we need to test if parameters of the same name but wrong type are passed??
}

View File

@ -1,74 +0,0 @@
package config
import (
"fmt"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
)
func ReadStringListValues(conf Configuration, paramName string) ([]string, error) {
param, err := conf.GetConfigParam(paramName)
if err != nil {
return nil, err
}
value, ok := param.(StringListParam)
if !ok {
value, ok = param.(StringListParam)
if !ok {
return nil, fmt.Errorf("%s must be a list of strings", paramName)
}
}
return value.Values, nil
}
func ReadStringValue(conf Configuration, paramName string) (string, error) {
param, err := conf.GetConfigParam(paramName)
if err != nil {
return "", err
}
value, ok := param.(StringParam)
if !ok {
return "", fmt.Errorf("%s must be a list of strings", paramName)
}
return value.Value, nil
}
func ReadIntValue(conf Configuration, paramName string) (int64, error) {
param, err := conf.GetConfigParam(paramName)
if err != nil {
return 0, err
}
value, ok := param.(IntParam)
if !ok {
return 9, fmt.Errorf("%s must be an unsigned integer", paramName)
}
return value.Value, nil
}
func ReadBoolValue(conf Configuration, paramName string) (bool, error) {
param, err := conf.GetConfigParam(paramName)
if err != nil {
return false, err
}
value, ok := param.(BoolParam)
if !ok {
return false, fmt.Errorf("%s must be a boolean", paramName)
}
return value.Value, nil
}
func ReadStringOptionsValue(conf Configuration, paramName string) (string, error) {
param, err := conf.GetConfigParam(paramName)
if err != nil {
return "", err
}
value, ok := param.(StringOptions)
if !ok {
return "", fmt.Errorf("%s must be a string options", paramName)
}
if !utils.Contains(value.Options, value.Value) {
return "", fmt.Errorf("%s is not a valid option in %v for the the parameter %s", value.Value, value.Options, paramName)
}
return value.Value, nil
}

View File

@ -1,142 +0,0 @@
package config_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/stretchr/testify/require"
)
func TestReadStringListValues(t *testing.T) {
t.Run("can find a string list param", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
},
config.StringListParam{
Values: []string{"licenses", "settings"},
Param: config.NewParam("exclude_tables", "Specify list of tables not to collect", false),
},
},
}
excludeTables, err := config.ReadStringListValues(conf, "exclude_tables")
require.Nil(t, err)
require.Equal(t, []string{"licenses", "settings"}, excludeTables)
})
}
func TestReadStringValue(t *testing.T) {
t.Run("can find a string param", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
},
config.StringParam{
Value: "/tmp/dump",
Param: config.NewParam("directory", "Specify a directory", false),
},
},
}
directory, err := config.ReadStringValue(conf, "directory")
require.Nil(t, err)
require.Equal(t, "/tmp/dump", directory)
})
}
func TestReadIntValue(t *testing.T) {
t.Run("can find an integer param", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.IntParam{
// nil means include everything
Value: 10000,
Param: config.NewParam("row_limit", "Max Rows to collect", false),
},
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
},
config.StringParam{
Value: "/tmp/dump",
Param: config.NewParam("directory", "Specify a directory", false),
},
},
}
rowLimit, err := config.ReadIntValue(conf, "row_limit")
require.Nil(t, err)
require.Equal(t, int64(10000), rowLimit)
})
}
func TestReadBoolValue(t *testing.T) {
t.Run("can find a boolean param", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.BoolParam{
// nil means include everything
Value: true,
Param: config.NewParam("compress", "Compress data", false),
},
config.StringListParam{
// nil means include everything
Values: nil,
Param: config.NewParam("include_tables", "Specify list of tables to collect", false),
},
config.StringParam{
Value: "/tmp/dump",
Param: config.NewParam("directory", "Specify a directory", false),
},
},
}
compress, err := config.ReadBoolValue(conf, "compress")
require.Nil(t, err)
require.True(t, compress)
})
}
func TestReadStringOptionsValue(t *testing.T) {
t.Run("can find a string value in a list of options", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringOptions{
Param: config.NewParam("format", "List of formats", false),
Options: []string{"csv", "tsv", "binary", "json", "ndjson"},
Value: "csv",
AllowEmpty: false,
},
},
}
format, err := config.ReadStringOptionsValue(conf, "format")
require.Nil(t, err)
require.Equal(t, "csv", format)
})
t.Run("errors on invalid value", func(t *testing.T) {
conf := config.Configuration{
Params: []config.ConfigParam{
config.StringOptions{
Param: config.NewParam("format", "List of formats", false),
Options: []string{"csv", "tsv", "binary", "json", "ndjson"},
Value: "random",
AllowEmpty: false,
},
},
}
format, err := config.ReadStringOptionsValue(conf, "format")
require.Equal(t, "random is not a valid option in [csv tsv binary json ndjson] for the the parameter format", err.Error())
require.Equal(t, "", format)
})
}

View File

@ -1,27 +0,0 @@
package data
import (
"strings"
)
// DiagnosticBundle contains the results from a Collector
// each frame can represent a table or collection of data files. By allowing multiple frames a single DiagnosticBundle
// can potentially contain many related tables
type DiagnosticBundle struct {
Frames map[string]Frame
// Errors is a property to be set if the Collector has an error. This can be used to indicate a partial collection
// and failed frames
Errors FrameErrors
}
type FrameErrors struct {
Errors []error
}
func (fe *FrameErrors) Error() string {
errors := make([]string, len(fe.Errors))
for i := range errors {
errors[i] = fe.Errors[i].Error()
}
return strings.Join(errors, "\n")
}

View File

@ -1,26 +0,0 @@
package data_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
)
func TestBundleError(t *testing.T) {
t.Run("can get a bundle error", func(t *testing.T) {
errs := make([]error, 3)
errs[0] = errors.New("Error 1")
errs[1] = errors.New("Error 2")
errs[2] = errors.New("Error 3")
fErrors := data.FrameErrors{
Errors: errs,
}
require.Equal(t, `Error 1
Error 2
Error 3`, fErrors.Error())
})
}

View File

@ -1,88 +0,0 @@
package data
import (
"database/sql"
"fmt"
"reflect"
"strings"
)
type DatabaseFrame struct {
name string
ColumnNames []string
rows *sql.Rows
columnTypes []*sql.ColumnType
vars []interface{}
}
func NewDatabaseFrame(name string, rows *sql.Rows) (DatabaseFrame, error) {
databaseFrame := DatabaseFrame{}
columnTypes, err := rows.ColumnTypes()
if err != nil {
return DatabaseFrame{}, err
}
databaseFrame.columnTypes = columnTypes
databaseFrame.name = name
vars := make([]interface{}, len(columnTypes))
columnNames := make([]string, len(columnTypes))
for i := range columnTypes {
value := reflect.Zero(columnTypes[i].ScanType()).Interface()
vars[i] = &value
columnNames[i] = columnTypes[i].Name()
}
databaseFrame.ColumnNames = columnNames
databaseFrame.vars = vars
databaseFrame.rows = rows
return databaseFrame, nil
}
func (f DatabaseFrame) Next() ([]interface{}, bool, error) {
values := make([]interface{}, len(f.columnTypes))
for f.rows.Next() {
if err := f.rows.Scan(f.vars...); err != nil {
return nil, false, err
}
for i := range f.columnTypes {
ptr := reflect.ValueOf(f.vars[i])
values[i] = ptr.Elem().Interface()
}
return values, true, nil //nolint
}
// TODO: raise issue as this seems to always raise an error
//err := f.rows.Err()
f.rows.Close()
return nil, false, nil
}
func (f DatabaseFrame) Columns() []string {
return f.ColumnNames
}
func (f DatabaseFrame) Name() string {
return f.name
}
type Order int
const (
Asc Order = 1
Desc Order = 2
)
type OrderBy struct {
Column string
Order Order
}
func (o OrderBy) String() string {
if strings.TrimSpace(o.Column) == "" {
return ""
}
switch o.Order {
case Asc:
return fmt.Sprintf(" ORDER BY %s ASC", o.Column)
case Desc:
return fmt.Sprintf(" ORDER BY %s DESC", o.Column)
}
return ""
}

View File

@ -1,86 +0,0 @@
package data_test
import (
"database/sql"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/DATA-DOG/go-sqlmock"
"github.com/stretchr/testify/require"
)
func TestString(t *testing.T) {
t.Run("can order by asc", func(t *testing.T) {
orderBy := data.OrderBy{
Column: "created_at",
Order: data.Asc,
}
require.Equal(t, " ORDER BY created_at ASC", orderBy.String())
})
t.Run("can order by desc", func(t *testing.T) {
orderBy := data.OrderBy{
Column: "created_at",
Order: data.Desc,
}
require.Equal(t, " ORDER BY created_at DESC", orderBy.String())
})
}
func TestNextDatabaseFrame(t *testing.T) {
t.Run("can iterate sql rows", func(t *testing.T) {
rowValues := [][]interface{}{
{int64(1), "post_1", "hello"},
{int64(2), "post_2", "world"},
{int64(3), "post_3", "goodbye"},
{int64(4), "post_4", "world"},
}
mockRows := sqlmock.NewRows([]string{"id", "title", "body"})
for i := range rowValues {
mockRows.AddRow(rowValues[i][0], rowValues[i][1], rowValues[i][2])
}
rows := mockRowsToSqlRows(mockRows)
dbFrame, err := data.NewDatabaseFrame("test", rows)
require.ElementsMatch(t, dbFrame.Columns(), []string{"id", "title", "body"})
require.Nil(t, err)
i := 0
for {
values, ok, err := dbFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Len(t, values, 3)
require.ElementsMatch(t, values, rowValues[i])
i++
}
require.Equal(t, 4, i)
})
t.Run("can iterate empty sql rows", func(t *testing.T) {
mockRows := sqlmock.NewRows([]string{"id", "title", "body"})
rows := mockRowsToSqlRows(mockRows)
dbFrame, err := data.NewDatabaseFrame("test", rows)
require.ElementsMatch(t, dbFrame.Columns(), []string{"id", "title", "body"})
require.Nil(t, err)
i := 0
for {
_, ok, err := dbFrame.Next()
require.Nil(t, err)
if !ok {
break
}
i++
}
require.Equal(t, 0, i)
})
}
func mockRowsToSqlRows(mockRows *sqlmock.Rows) *sql.Rows {
db, mock, _ := sqlmock.New()
mock.ExpectQuery("select").WillReturnRows(mockRows)
rows, _ := db.Query("select")
return rows
}

View File

@ -1,8 +0,0 @@
package data
type Field struct {
// Name of the field
Name string
// A list of fields that must implement FieldType interface
Values []interface{}
}

View File

@ -1,444 +0,0 @@
package data
import (
"bufio"
"encoding/xml"
"io/ioutil"
"os"
"path"
"path/filepath"
"regexp"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/pkg/errors"
"gopkg.in/yaml.v3"
)
type File interface {
Copy(destPath string, removeSensitive bool) error
FilePath() string
}
type SimpleFile struct {
Path string
}
// Copy supports removeSensitive for other file types but for a simple file this doesn't do anything
func (s SimpleFile) Copy(destPath string, removeSensitive bool) error {
// simple copy easiest
if err := utils.CopyFile(s.FilePath(), destPath); err != nil {
return errors.Wrapf(err, "unable to copy file %s", s.FilePath())
}
return nil
}
func (s SimpleFile) FilePath() string {
return s.Path
}
func NewFileFrame(name string, filePaths []string) FileFrame {
i := 0
files := make([]File, len(filePaths))
for i, path := range filePaths {
files[i] = SimpleFile{
Path: path,
}
}
return FileFrame{
name: name,
i: &i,
files: files,
}
}
type FileFrame struct {
name string
i *int
files []File
}
func (f FileFrame) Next() ([]interface{}, bool, error) {
if len(f.files) == *(f.i) {
return nil, false, nil
}
file := f.files[*f.i]
*f.i++
value := make([]interface{}, 1)
value[0] = file
return value, true, nil
}
func (f FileFrame) Columns() []string {
return []string{"files"}
}
func (f FileFrame) Name() string {
return f.name
}
// DirectoryFileFrame represents a set of files under a directory
type DirectoryFileFrame struct {
FileFrame
Directory string
}
func NewFileDirectoryFrame(directory string, exts []string) (DirectoryFileFrame, []error) {
filePaths, errs := utils.ListFilesInDirectory(directory, exts)
files := make([]File, len(filePaths))
for i, path := range filePaths {
files[i] = SimpleFile{
Path: path,
}
}
i := 0
return DirectoryFileFrame{
Directory: directory,
FileFrame: FileFrame{
files: files,
i: &i,
},
}, errs
}
func (f DirectoryFileFrame) Next() ([]interface{}, bool, error) {
if len(f.files) == *(f.i) {
return nil, false, nil
}
file := f.files[*f.i]
*f.i++
value := make([]interface{}, 1)
value[0] = file
return value, true, nil
}
func (f DirectoryFileFrame) Columns() []string {
return []string{"files"}
}
func (f DirectoryFileFrame) Name() string {
return f.Directory
}
type ConfigFile interface {
File
FindLogPaths() ([]string, error)
FindIncludedConfig() (ConfigFile, error)
IsIncluded() bool
}
type ConfigFileFrame struct {
i *int
Directory string
files []ConfigFile
}
func (f ConfigFileFrame) Next() ([]interface{}, bool, error) {
if len(f.files) == *(f.i) {
return nil, false, nil
}
file := f.files[*f.i]
*f.i++
value := make([]interface{}, 1)
value[0] = file
return value, true, nil
}
func (f ConfigFileFrame) Name() string {
return f.Directory
}
func NewConfigFileFrame(directory string) (ConfigFileFrame, []error) {
files, errs := utils.ListFilesInDirectory(directory, []string{"*.xml", "*.yaml", "*.yml"})
// we can't predict the length because of include files
var configs []ConfigFile
for _, path := range files {
var configFile ConfigFile
switch ext := filepath.Ext(path); ext {
case ".xml":
configFile = XmlConfigFile{
Path: path,
Included: false,
}
case ".yml":
configFile = YamlConfigFile{
Path: path,
Included: false,
}
case ".yaml":
configFile = YamlConfigFile{
Path: path,
}
}
if configFile != nil {
configs = append(configs, configFile)
// add any included configs
iConf, err := configFile.FindIncludedConfig()
if err != nil {
errs = append(errs, err)
} else {
if iConf.FilePath() != "" {
configs = append(configs, iConf)
}
}
}
}
i := 0
return ConfigFileFrame{
i: &i,
Directory: directory,
files: configs,
}, errs
}
func (f ConfigFileFrame) Columns() []string {
return []string{"config"}
}
func (f ConfigFileFrame) FindLogPaths() (logPaths []string, errors []error) {
for _, configFile := range f.files {
paths, err := configFile.FindLogPaths()
if err != nil {
errors = append(errors, err)
} else {
logPaths = append(logPaths, paths...)
}
}
return logPaths, errors
}
type XmlConfigFile struct {
Path string
Included bool
}
// these patterns will be used to remove sensitive content - matches of the pattern will be replaced with the key
var xmlSensitivePatterns = map[string]*regexp.Regexp{
"<password>Replaced</password>": regexp.MustCompile(`<password>(.*)</password>`),
"<password_sha256_hex>Replaced</password_sha256_hex>": regexp.MustCompile(`<password_sha256_hex>(.*)</password_sha256_hex>`),
"<secret_access_key>Replaced</secret_access_key>": regexp.MustCompile(`<secret_access_key>(.*)</secret_access_key>`),
"<access_key_id>Replaced</access_key_id>": regexp.MustCompile(`<access_key_id>(.*)</access_key_id>`),
"<secret>Replaced</secret>": regexp.MustCompile(`<secret>(.*)</secret>`),
}
func (x XmlConfigFile) Copy(destPath string, removeSensitive bool) error {
if !removeSensitive {
// simple copy easiest
if err := utils.CopyFile(x.FilePath(), destPath); err != nil {
return errors.Wrapf(err, "unable to copy file %s", x.FilePath())
}
return nil
}
return sensitiveFileCopy(x.FilePath(), destPath, xmlSensitivePatterns)
}
func (x XmlConfigFile) FilePath() string {
return x.Path
}
func (x XmlConfigFile) IsIncluded() bool {
return x.Included
}
type XmlLoggerConfig struct {
XMLName xml.Name `xml:"logger"`
ErrorLog string `xml:"errorlog"`
Log string `xml:"log"`
}
type YandexXMLConfig struct {
XMLName xml.Name `xml:"yandex"`
Clickhouse XmlLoggerConfig `xml:"logger"`
IncludeFrom string `xml:"include_from"`
}
type XmlConfig struct {
XMLName xml.Name `xml:"clickhouse"`
Clickhouse XmlLoggerConfig `xml:"logger"`
IncludeFrom string `xml:"include_from"`
}
func (x XmlConfigFile) UnmarshallConfig() (XmlConfig, error) {
inputFile, err := ioutil.ReadFile(x.Path)
if err != nil {
return XmlConfig{}, err
}
var cConfig XmlConfig
err = xml.Unmarshal(inputFile, &cConfig)
if err == nil {
return XmlConfig{
Clickhouse: cConfig.Clickhouse,
IncludeFrom: cConfig.IncludeFrom,
}, nil
}
// attempt to marshall as yandex file
var yConfig YandexXMLConfig
err = xml.Unmarshal(inputFile, &yConfig)
if err != nil {
return XmlConfig{}, err
}
return XmlConfig{
Clickhouse: yConfig.Clickhouse,
IncludeFrom: yConfig.IncludeFrom,
}, nil
}
func (x XmlConfigFile) FindLogPaths() ([]string, error) {
var paths []string
config, err := x.UnmarshallConfig()
if err != nil {
return nil, err
}
if config.Clickhouse.Log != "" {
paths = append(paths, config.Clickhouse.Log)
}
if config.Clickhouse.ErrorLog != "" {
paths = append(paths, config.Clickhouse.ErrorLog)
}
return paths, nil
}
func (x XmlConfigFile) FindIncludedConfig() (ConfigFile, error) {
if x.Included {
//can't recurse
return XmlConfigFile{}, nil
}
config, err := x.UnmarshallConfig()
if err != nil {
return XmlConfigFile{}, err
}
// we need to convert this
if config.IncludeFrom != "" {
if filepath.IsAbs(config.IncludeFrom) {
return XmlConfigFile{Path: config.IncludeFrom, Included: true}, nil
}
confDir := filepath.Dir(x.FilePath())
return XmlConfigFile{Path: path.Join(confDir, config.IncludeFrom), Included: true}, nil
}
return XmlConfigFile{}, nil
}
type YamlConfigFile struct {
Path string
Included bool
}
var ymlSensitivePatterns = map[string]*regexp.Regexp{
"password: 'Replaced'": regexp.MustCompile(`password:\s*.*$`),
"password_sha256_hex: 'Replaced'": regexp.MustCompile(`password_sha256_hex:\s*.*$`),
"access_key_id: 'Replaced'": regexp.MustCompile(`access_key_id:\s*.*$`),
"secret_access_key: 'Replaced'": regexp.MustCompile(`secret_access_key:\s*.*$`),
"secret: 'Replaced'": regexp.MustCompile(`secret:\s*.*$`),
}
func (y YamlConfigFile) Copy(destPath string, removeSensitive bool) error {
if !removeSensitive {
// simple copy easiest
if err := utils.CopyFile(y.FilePath(), destPath); err != nil {
return errors.Wrapf(err, "unable to copy file %s", y.FilePath())
}
return nil
}
return sensitiveFileCopy(y.FilePath(), destPath, ymlSensitivePatterns)
}
func (y YamlConfigFile) FilePath() string {
return y.Path
}
func (y YamlConfigFile) IsIncluded() bool {
return y.Included
}
type YamlLoggerConfig struct {
Log string
ErrorLog string
}
type YamlConfig struct {
Logger YamlLoggerConfig
Include_From string
}
func (y YamlConfigFile) FindLogPaths() ([]string, error) {
var paths []string
inputFile, err := ioutil.ReadFile(y.Path)
if err != nil {
return nil, err
}
var config YamlConfig
err = yaml.Unmarshal(inputFile, &config)
if err != nil {
return nil, err
}
if config.Logger.Log != "" {
paths = append(paths, config.Logger.Log)
}
if config.Logger.ErrorLog != "" {
paths = append(paths, config.Logger.ErrorLog)
}
return paths, nil
}
func (y YamlConfigFile) FindIncludedConfig() (ConfigFile, error) {
if y.Included {
//can't recurse
return YamlConfigFile{}, nil
}
inputFile, err := ioutil.ReadFile(y.Path)
if err != nil {
return YamlConfigFile{}, err
}
var config YamlConfig
err = yaml.Unmarshal(inputFile, &config)
if err != nil {
return YamlConfigFile{}, err
}
if config.Include_From != "" {
if filepath.IsAbs(config.Include_From) {
return YamlConfigFile{Path: config.Include_From, Included: true}, nil
}
confDir := filepath.Dir(y.FilePath())
return YamlConfigFile{Path: path.Join(confDir, config.Include_From), Included: true}, nil
}
return YamlConfigFile{}, nil
}
func sensitiveFileCopy(sourcePath string, destPath string, patterns map[string]*regexp.Regexp) error {
destDir := filepath.Dir(destPath)
if err := os.MkdirAll(destDir, os.ModePerm); err != nil {
return errors.Wrapf(err, "unable to create directory %s", destDir)
}
// currently, we don't unmarshall into a struct - we want to preserve structure and comments. Possibly could
// be handled but for simplicity we do a line parse for now
inputFile, err := os.Open(sourcePath)
if err != nil {
return err
}
defer inputFile.Close()
outputFile, err := os.Create(destPath)
if err != nil {
return err
}
defer outputFile.Close()
writer := bufio.NewWriter(outputFile)
scanner := bufio.NewScanner(inputFile)
for scanner.Scan() {
line := scanner.Text()
for repl, pattern := range patterns {
line = pattern.ReplaceAllString(line, repl)
}
_, err = writer.WriteString(line + "\n")
if err != nil {
return err
}
}
writer.Flush()
return nil
}

View File

@ -1,263 +0,0 @@
package data_test
import (
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"strings"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestNextFileDirectoryFrame(t *testing.T) {
t.Run("can iterate file frame", func(t *testing.T) {
tempDir := t.TempDir()
files := make([]string, 5)
for i := 0; i < 5; i++ {
fileDir := path.Join(tempDir, fmt.Sprintf("%d", i))
err := os.MkdirAll(fileDir, os.ModePerm)
require.Nil(t, err)
filepath := path.Join(fileDir, fmt.Sprintf("random-%d.txt", i))
files[i] = filepath
_, err = os.Create(filepath)
require.Nil(t, err)
}
fileFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*.txt"})
require.Empty(t, errs)
i := 0
for {
values, ok, err := fileFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Len(t, values, 1)
require.Equal(t, files[i], values[0].(data.SimpleFile).Path)
i += 1
}
require.Equal(t, 5, i)
})
t.Run("can iterate file frame when empty", func(t *testing.T) {
// create 5 temporary files
tempDir := t.TempDir()
fileFrame, errs := data.NewFileDirectoryFrame(tempDir, []string{"*"})
require.Empty(t, errs)
i := 0
for {
_, ok, err := fileFrame.Next()
require.Nil(t, err)
if !ok {
break
}
}
require.Equal(t, 0, i)
})
}
func TestNewConfigFileFrame(t *testing.T) {
t.Run("can iterate config file frame", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
require.Empty(t, errs)
i := 0
for {
values, ok, err := configFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Len(t, values, 1)
filePath := values[0].(data.XmlConfigFile).FilePath()
require.True(t, strings.Contains(filePath, ".xml"))
i += 1
}
// 5 not 3 due to the includes
require.Equal(t, 5, i)
})
t.Run("can iterate file frame when empty", func(t *testing.T) {
// create 5 temporary files
tempDir := t.TempDir()
configFrame, errs := data.NewConfigFileFrame(tempDir)
require.Empty(t, errs)
i := 0
for {
_, ok, err := configFrame.Next()
require.Nil(t, err)
if !ok {
break
}
}
require.Equal(t, 0, i)
})
}
func TestConfigFileFrameCopy(t *testing.T) {
t.Run("can copy non-sensitive xml config files", func(t *testing.T) {
tmrDir := t.TempDir()
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
require.Empty(t, errs)
for {
values, ok, err := configFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Nil(t, err)
require.True(t, ok)
configFile := values[0].(data.XmlConfigFile)
newPath := path.Join(tmrDir, filepath.Base(configFile.FilePath()))
err = configFile.Copy(newPath, false)
require.FileExists(t, newPath)
sourceInfo, _ := os.Stat(configFile.FilePath())
destInfo, _ := os.Stat(newPath)
require.Equal(t, sourceInfo.Size(), destInfo.Size())
require.Nil(t, err)
}
})
t.Run("can copy sensitive xml config files", func(t *testing.T) {
tmrDir := t.TempDir()
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
require.Empty(t, errs)
i := 0
var checkedFiles []string
for {
values, ok, err := configFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Nil(t, err)
require.True(t, ok)
configFile := values[0].(data.XmlConfigFile)
fileName := filepath.Base(configFile.FilePath())
newPath := path.Join(tmrDir, fileName)
err = configFile.Copy(newPath, true)
require.FileExists(t, newPath)
require.Nil(t, err)
bytes, err := ioutil.ReadFile(newPath)
require.Nil(t, err)
s := string(bytes)
checkedFiles = append(checkedFiles, fileName)
if fileName == "users.xml" || fileName == "default-password.xml" || fileName == "user-include.xml" {
require.True(t, strings.Contains(s, "<password>Replaced</password>") ||
strings.Contains(s, "<password_sha256_hex>Replaced</password_sha256_hex>"))
require.NotContains(t, s, "<password>REPLACE_ME</password>")
require.NotContains(t, s, "<password_sha256_hex>REPLACE_ME</password_sha256_hex>")
} else if fileName == "config.xml" {
require.True(t, strings.Contains(s, "<access_key_id>Replaced</access_key_id>"))
require.True(t, strings.Contains(s, "<secret_access_key>Replaced</secret_access_key>"))
require.True(t, strings.Contains(s, "<secret>Replaced</secret>"))
require.NotContains(t, s, "<access_key_id>REPLACE_ME</access_key_id>")
require.NotContains(t, s, "<secret_access_key>REPLACE_ME</secret_access_key>")
require.NotContains(t, s, "<secret>REPLACE_ME</secret>")
}
i++
}
require.ElementsMatch(t, []string{"users.xml", "default-password.xml", "user-include.xml", "config.xml", "server-include.xml"}, checkedFiles)
require.Equal(t, 5, i)
})
t.Run("can copy sensitive yaml config files", func(t *testing.T) {
tmrDir := t.TempDir()
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yaml"))
require.Empty(t, errs)
i := 0
var checkedFiles []string
for {
values, ok, err := configFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.Nil(t, err)
require.True(t, ok)
configFile := values[0].(data.YamlConfigFile)
fileName := filepath.Base(configFile.FilePath())
newPath := path.Join(tmrDir, fileName)
err = configFile.Copy(newPath, true)
require.FileExists(t, newPath)
require.Nil(t, err)
bytes, err := ioutil.ReadFile(newPath)
require.Nil(t, err)
s := string(bytes)
checkedFiles = append(checkedFiles, fileName)
if fileName == "users.yaml" || fileName == "default-password.yaml" || fileName == "user-include.yaml" {
require.True(t, strings.Contains(s, "password: 'Replaced'") ||
strings.Contains(s, "password_sha256_hex: 'Replaced'"))
require.NotContains(t, s, "password: 'REPLACE_ME'")
require.NotContains(t, s, "password_sha256_hex: \"REPLACE_ME\"")
} else if fileName == "config.yaml" {
require.True(t, strings.Contains(s, "access_key_id: 'Replaced'"))
require.True(t, strings.Contains(s, "secret_access_key: 'Replaced'"))
require.True(t, strings.Contains(s, "secret: 'Replaced'"))
require.NotContains(t, s, "access_key_id: 'REPLACE_ME'")
require.NotContains(t, s, "secret_access_key: REPLACE_ME")
require.NotContains(t, s, "secret: REPLACE_ME")
}
i++
}
require.ElementsMatch(t, []string{"users.yaml", "default-password.yaml", "user-include.yaml", "config.yaml", "server-include.yaml"}, checkedFiles)
require.Equal(t, 5, i)
})
}
func TestConfigFileFrameFindLogPaths(t *testing.T) {
t.Run("can find xml log paths", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "xml"))
require.Empty(t, errs)
paths, errs := configFrame.FindLogPaths()
require.Empty(t, errs)
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
})
t.Run("can handle empty log paths", func(t *testing.T) {
configFrame, errs := data.NewConfigFileFrame(t.TempDir())
require.Empty(t, errs)
paths, errs := configFrame.FindLogPaths()
require.Empty(t, errs)
require.Empty(t, paths)
})
t.Run("can find yaml log paths", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yaml"))
require.Empty(t, errs)
paths, errs := configFrame.FindLogPaths()
require.Empty(t, errs)
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
})
}
// test the legacy format for ClickHouse xml config files with a yandex root tag
func TestYandexConfigFile(t *testing.T) {
t.Run("can find xml log paths with yandex root", func(t *testing.T) {
cwd, err := os.Getwd()
require.Nil(t, err)
configFrame, errs := data.NewConfigFileFrame(path.Join(cwd, "../../../testdata", "configs", "yandex_xml"))
require.Empty(t, errs)
paths, errs := configFrame.FindLogPaths()
require.Empty(t, errs)
require.ElementsMatch(t, []string{"/var/log/clickhouse-server/clickhouse-server.log",
"/var/log/clickhouse-server/clickhouse-server.err.log"}, paths)
})
}

View File

@ -1,11 +0,0 @@
package data
type BaseFrame struct {
Name string
}
type Frame interface {
Next() ([]interface{}, bool, error)
Columns() []string
Name() string
}

View File

@ -1,35 +0,0 @@
package data
type MemoryFrame struct {
i *int
ColumnNames []string
Rows [][]interface{}
name string
}
func NewMemoryFrame(name string, columns []string, rows [][]interface{}) MemoryFrame {
i := 0
return MemoryFrame{
i: &i,
Rows: rows,
ColumnNames: columns,
name: name,
}
}
func (f MemoryFrame) Next() ([]interface{}, bool, error) {
if f.Rows == nil || len(f.Rows) == *(f.i) {
return nil, false, nil
}
value := f.Rows[*f.i]
*f.i++
return value, true, nil
}
func (f MemoryFrame) Columns() []string {
return f.ColumnNames
}
func (f MemoryFrame) Name() string {
return f.name
}

View File

@ -1,61 +0,0 @@
package data_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/stretchr/testify/require"
)
func TestNextMemoryFrame(t *testing.T) {
t.Run("can iterate memory frame", func(t *testing.T) {
columns := []string{"Filesystem", "Size", "Used", "Avail", "Use%", "Mounted on"}
rows := [][]interface{}{
{"sysfs", 0, 0, 0, 0, "/sys"},
{"proc", 0, 0, 0, 0, "/proc"},
{"udev", 33357840384, 0, 33357840384, 0, "/dev"},
{"devpts", 0, 0, 0, 0, "/dev/pts"},
{"tmpfs", 6682607616, 2228224, 6680379392, 1, "/run"},
{"/dev/mapper/system-root", 1938213220352, 118136926208, 1721548947456, 7.000000000000001, "/"},
}
memoryFrame := data.NewMemoryFrame("disks", columns, rows)
i := 0
for {
values, ok, err := memoryFrame.Next()
require.Nil(t, err)
if !ok {
break
}
require.ElementsMatch(t, values, rows[i])
require.Len(t, values, 6)
i += 1
}
require.Equal(t, 6, i)
})
t.Run("can iterate memory frame when empty", func(t *testing.T) {
memoryFrame := data.NewMemoryFrame("test", []string{}, [][]interface{}{})
i := 0
for {
_, ok, err := memoryFrame.Next()
require.Nil(t, err)
if !ok {
break
}
}
require.Equal(t, 0, i)
})
t.Run("can iterate memory frame when empty", func(t *testing.T) {
memoryFrame := data.MemoryFrame{}
i := 0
for {
_, ok, err := memoryFrame.Next()
require.Nil(t, err)
if !ok {
break
}
}
require.Equal(t, 0, i)
})
}

View File

@ -1,27 +0,0 @@
package data
func NewHierarchicalFrame(name string, frame Frame, subFrames []HierarchicalFrame) HierarchicalFrame {
return HierarchicalFrame{
name: name,
DataFrame: frame,
SubFrames: subFrames,
}
}
type HierarchicalFrame struct {
name string
DataFrame Frame
SubFrames []HierarchicalFrame
}
func (hf HierarchicalFrame) Name() string {
return hf.name
}
func (hf HierarchicalFrame) Columns() []string {
return hf.DataFrame.Columns()
}
func (hf HierarchicalFrame) Next() ([]interface{}, bool, error) {
return hf.DataFrame.Next()
}

View File

@ -1,95 +0,0 @@
package database
import (
"database/sql"
"fmt"
"net/url"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
_ "github.com/ClickHouse/clickhouse-go/v2"
"github.com/pkg/errors"
)
type ClickhouseNativeClient struct {
host string
connection *sql.DB
}
func NewNativeClient(host string, port uint16, username string, password string) (*ClickhouseNativeClient, error) {
// debug output ?debug=true
connection, err := sql.Open("clickhouse", fmt.Sprintf("clickhouse://%s:%s@%s:%d/", url.QueryEscape(username), url.QueryEscape(password), host, port))
if err != nil {
return &ClickhouseNativeClient{}, err
}
if err := connection.Ping(); err != nil {
return &ClickhouseNativeClient{}, err
}
return &ClickhouseNativeClient{
host: host,
connection: connection,
}, nil
}
func (c *ClickhouseNativeClient) Ping() error {
return c.connection.Ping()
}
func (c *ClickhouseNativeClient) ReadTable(databaseName string, tableName string, excludeColumns []string, orderBy data.OrderBy, limit int64) (data.Frame, error) {
exceptClause := ""
if len(excludeColumns) > 0 {
exceptClause = fmt.Sprintf("EXCEPT(%s) ", strings.Join(excludeColumns, ","))
}
limitClause := ""
if limit >= 0 {
limitClause = fmt.Sprintf(" LIMIT %d", limit)
}
rows, err := c.connection.Query(fmt.Sprintf("SELECT * %sFROM %s.%s%s%s", exceptClause, databaseName, tableName, orderBy.String(), limitClause))
if err != nil {
return data.DatabaseFrame{}, err
}
return data.NewDatabaseFrame(fmt.Sprintf("%s.%s", databaseName, tableName), rows)
}
func (c *ClickhouseNativeClient) ReadTableNamesForDatabase(databaseName string) ([]string, error) {
rows, err := c.connection.Query(fmt.Sprintf("SHOW TABLES FROM %s", databaseName))
if err != nil {
return nil, err
}
defer rows.Close()
var tableNames []string
var name string
for rows.Next() {
if err := rows.Scan(&name); err != nil {
return nil, err
}
tableNames = append(tableNames, name)
}
return tableNames, nil
}
func (c *ClickhouseNativeClient) ExecuteStatement(id string, statement string) (data.Frame, error) {
rows, err := c.connection.Query(statement)
if err != nil {
return data.DatabaseFrame{}, err
}
return data.NewDatabaseFrame(id, rows)
}
func (c *ClickhouseNativeClient) Version() (string, error) {
frame, err := c.ExecuteStatement("version", "SELECT version() as version")
if err != nil {
return "", err
}
values, ok, err := frame.Next()
if err != nil {
return "", err
}
if !ok {
return "", errors.New("unable to read ClickHouse version")
}
if len(values) != 1 {
return "", errors.New("unable to read ClickHouse version - no rows returned")
}
return values[0].(string), nil
}

View File

@ -1,289 +0,0 @@
//go:build !no_docker
package database_test
import (
"context"
"fmt"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/database"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/docker/go-connections/nat"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
)
func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainers.Container, nat.Port) {
// create a ClickHouse container
cwd, err := os.Getwd()
if err != nil {
// can't test without current directory
panic(err)
}
// for now, we test against a hardcoded database-server version but we should make this a property
req := testcontainers.ContainerRequest{
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
ExposedPorts: []string{"9000/tcp"},
WaitingFor: wait.ForLog("Ready for connections"),
Mounts: testcontainers.ContainerMounts{
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../../../testdata/docker/custom.xml"),
},
Target: "/etc/clickhouse-server/config.d/custom.xml",
},
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../../../testdata/docker/admin.xml"),
},
Target: "/etc/clickhouse-server/users.d/admin.xml",
},
},
}
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
// can't test without container
panic(err)
}
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
if err != nil {
// can't test without container's port
panic(err)
}
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
return clickhouseContainer, p
}
func getClient(t *testing.T, mappedPort int) *database.ClickhouseNativeClient {
clickhouseClient, err := database.NewNativeClient("localhost", uint16(mappedPort), "", "")
if err != nil {
t.Fatalf("unable to build client : %v", err)
}
return clickhouseClient
}
func TestReadTableNamesForDatabase(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
t.Run("client can read tables for a database", func(t *testing.T) {
tables, err := clickhouseClient.ReadTableNamesForDatabase("system")
require.Nil(t, err)
require.GreaterOrEqual(t, len(tables), 70)
require.Contains(t, tables, "merge_tree_settings")
})
}
func TestReadTable(t *testing.T) {
t.Run("client can get all rows for system.disks table", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
// we read the table system.disks as this should contain only 1 row
frame, err := clickhouseClient.ReadTable("system", "disks", []string{}, data.OrderBy{}, 10)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [9]string{"name", "path", "free_space", "total_space", "unreserved_space", "keep_free_space", "type", "is_encrypted", "cache_path"})
i := 0
for {
values, ok, err := frame.Next()
if i == 0 {
require.Nil(t, err)
require.True(t, ok)
require.Equal(t, "default", values[0])
require.Equal(t, "/var/lib/clickhouse/", values[1])
require.Greater(t, values[2], uint64(0))
require.Greater(t, values[3], uint64(0))
require.Greater(t, values[4], uint64(0))
require.Equal(t, values[5], uint64(0))
require.Equal(t, "local", values[6])
require.Equal(t, values[7], uint8(0))
require.Equal(t, values[8], "")
} else {
require.False(t, ok)
break
}
i += 1
}
})
t.Run("client can get all rows for system.databases table", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
// we read the table system.databases as this should be small and consistent on fresh db instances
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 10)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"})
expectedRows := [4][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"},
{"default", "Atomic", "/var/lib/clickhouse/store/"},
{"information_schema", "Memory", "/var/lib/clickhouse/"},
{"system", "Atomic", "/var/lib/clickhouse/store/"}}
i := 0
for {
values, ok, err := frame.Next()
if i < 4 {
require.Nil(t, err)
require.True(t, ok)
require.Equal(t, expectedRows[i][0], values[0])
require.Equal(t, expectedRows[i][1], values[1])
require.Equal(t, expectedRows[i][2], values[2])
require.NotNil(t, values[3])
require.NotNil(t, values[4])
require.Equal(t, "", values[5])
} else {
require.False(t, ok)
break
}
i += 1
}
})
t.Run("client can get all rows for system.databases table with except", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
frame, err := clickhouseClient.ReadTable("system", "databases", []string{"data_path", "comment"}, data.OrderBy{}, 10)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [4]string{"name", "engine", "metadata_path", "uuid"})
})
t.Run("client can limit rows for system.databases", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 1)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"})
expectedRows := [1][3]string{{"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"}}
i := 0
for {
values, ok, err := frame.Next()
if i == 0 {
require.Nil(t, err)
require.True(t, ok)
require.Equal(t, expectedRows[i][0], values[0])
require.Equal(t, expectedRows[i][1], values[1])
require.Equal(t, expectedRows[i][2], values[2])
require.NotNil(t, values[3])
require.NotNil(t, values[4])
require.Equal(t, "", values[5])
} else {
require.False(t, ok)
break
}
i += 1
}
})
t.Run("client can order rows for system.databases", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{
Column: "engine",
Order: data.Asc,
}, 10)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"})
expectedRows := [4][3]string{
{"default", "Atomic", "/var/lib/clickhouse/store/"},
{"system", "Atomic", "/var/lib/clickhouse/store/"},
{"INFORMATION_SCHEMA", "Memory", "/var/lib/clickhouse/"},
{"information_schema", "Memory", "/var/lib/clickhouse/"},
}
i := 0
for {
values, ok, err := frame.Next()
if i < 4 {
require.Nil(t, err)
require.True(t, ok)
require.Equal(t, expectedRows[i][0], values[0])
require.Equal(t, expectedRows[i][1], values[1])
require.Equal(t, expectedRows[i][2], values[2])
require.NotNil(t, values[3])
require.NotNil(t, values[4])
require.Equal(t, "", values[5])
} else {
require.False(t, ok)
break
}
i += 1
}
})
}
func TestExecuteStatement(t *testing.T) {
t.Run("client can execute any statement", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
statement := "SELECT path, count(*) as count FROM system.disks GROUP BY path;"
frame, err := clickhouseClient.ExecuteStatement("engines", statement)
require.Nil(t, err)
require.ElementsMatch(t, frame.Columns(), [2]string{"path", "count"})
expectedRows := [1][2]interface{}{
{"/var/lib/clickhouse/", uint64(1)},
}
i := 0
for {
values, ok, err := frame.Next()
if !ok {
require.Nil(t, err)
break
}
require.Nil(t, err)
require.Equal(t, expectedRows[i][0], values[0])
require.Equal(t, expectedRows[i][1], values[1])
i++
}
fmt.Println(i)
})
}
func TestVersion(t *testing.T) {
t.Run("client can read version", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
clickhouseClient := getClient(t, mappedPort.Int())
version, err := clickhouseClient.Version()
require.Nil(t, err)
require.NotEmpty(t, version)
})
}

View File

@ -1,49 +0,0 @@
package platform
import (
"errors"
"sync"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/database"
)
var once sync.Once
var dbInit sync.Once
// manages all resources that collectors and outputs may wish to ensure inc. db connections
type DBClient interface {
ReadTableNamesForDatabase(databaseName string) ([]string, error)
ReadTable(databaseName string, tableName string, excludeColumns []string, orderBy data.OrderBy, limit int64) (data.Frame, error)
ExecuteStatement(id string, statement string) (data.Frame, error)
Version() (string, error)
}
var manager *ResourceManager
type ResourceManager struct {
DbClient DBClient
}
func GetResourceManager() *ResourceManager {
once.Do(func() {
manager = &ResourceManager{}
})
return manager
}
func (m *ResourceManager) Connect(host string, port uint16, username string, password string) error {
var err error
var clientInstance DBClient
init := false
dbInit.Do(func() {
clientInstance, err = database.NewNativeClient(host, port, username, password)
manager.DbClient = clientInstance
init = true
})
if !init {
return errors.New("connect can only be called once")
}
return err
}

View File

@ -1,100 +0,0 @@
//go:build !no_docker
package platform_test
import (
"context"
"fmt"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/docker/go-connections/nat"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
)
// create a ClickHouse container
func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainers.Container, nat.Port) {
cwd, err := os.Getwd()
if err != nil {
fmt.Println("unable to read current directory", err)
os.Exit(1)
}
// for now, we test against a hardcoded database-server version but we should make this a property
req := testcontainers.ContainerRequest{
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
ExposedPorts: []string{"9000/tcp"},
WaitingFor: wait.ForLog("Ready for connections"),
Mounts: testcontainers.ContainerMounts{
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../../testdata/docker/custom.xml"),
},
Target: "/etc/clickhouse-server/config.d/custom.xml",
},
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../../testdata/docker/admin.xml"),
},
Target: "/etc/clickhouse-server/users.d/admin.xml",
},
},
}
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
// can't test without container
panic(err)
}
p, err := clickhouseContainer.MappedPort(ctx, "9000")
if err != nil {
// can't test without a port
panic(err)
}
return clickhouseContainer, p
}
func TestConnect(t *testing.T) {
t.Run("can only connect once", func(t *testing.T) {
ctx := context.Background()
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
defer clickhouseContainer.Terminate(ctx) //nolint
t.Setenv("CLICKHOUSE_DB_PORT", mappedPort.Port())
port := mappedPort.Int()
// get before connection
manager := platform.GetResourceManager()
require.Nil(t, manager.DbClient)
// init connection
err := manager.Connect("localhost", uint16(port), "", "")
require.Nil(t, err)
require.NotNil(t, manager.DbClient)
// try and re-fetch connection
err = manager.Connect("localhost", uint16(port), "", "")
require.NotNil(t, err)
require.Equal(t, "connect can only be called once", err.Error())
})
}
func TestGetResourceManager(t *testing.T) {
t.Run("get resource manager", func(t *testing.T) {
manager := platform.GetResourceManager()
require.NotNil(t, manager)
manager2 := platform.GetResourceManager()
require.NotNil(t, manager2)
require.Equal(t, &manager, &manager2)
})
}

View File

@ -1,166 +0,0 @@
package test
import (
"fmt"
"sort"
"strings"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/pkg/errors"
)
type fakeClickhouseClient struct {
tables map[string][]string
QueryResponses map[string]*FakeDataFrame
}
func NewFakeClickhouseClient(tables map[string][]string) fakeClickhouseClient {
queryResponses := make(map[string]*FakeDataFrame)
return fakeClickhouseClient{
tables: tables,
QueryResponses: queryResponses,
}
}
func (f fakeClickhouseClient) ReadTableNamesForDatabase(databaseName string) ([]string, error) {
if _, ok := f.tables[databaseName]; ok {
return f.tables[databaseName], nil
}
return nil, fmt.Errorf("database %s does not exist", databaseName)
}
func (f fakeClickhouseClient) ReadTable(databaseName string, tableName string, excludeColumns []string, orderBy data.OrderBy, limit int64) (data.Frame, error) {
exceptClause := ""
if len(excludeColumns) > 0 {
exceptClause = fmt.Sprintf("EXCEPT(%s) ", strings.Join(excludeColumns, ","))
}
limitClause := ""
if limit >= 0 {
limitClause = fmt.Sprintf(" LIMIT %d", limit)
}
query := fmt.Sprintf("SELECT * %sFROM %s.%s%s%s", exceptClause, databaseName, tableName, orderBy.String(), limitClause)
frame, error := f.ExecuteStatement(fmt.Sprintf("read_table_%s.%s", databaseName, tableName), query)
if error != nil {
return frame, error
}
fFrame := *(frame.(*FakeDataFrame))
fFrame = fFrame.FilterColumns(excludeColumns)
fFrame = fFrame.Order(orderBy)
fFrame = fFrame.Limit(limit)
return fFrame, nil
}
func (f fakeClickhouseClient) ExecuteStatement(id string, statement string) (data.Frame, error) {
if frame, ok := f.QueryResponses[statement]; ok {
return frame, nil
}
return FakeDataFrame{}, errors.New(fmt.Sprintf("No recorded response for %s", statement))
}
func (f fakeClickhouseClient) Version() (string, error) {
return "21.12.3", nil
}
func (f fakeClickhouseClient) Reset() {
for key, frame := range f.QueryResponses {
frame.Reset()
f.QueryResponses[key] = frame
}
}
type FakeDataFrame struct {
i *int
Rows [][]interface{}
ColumnNames []string
name string
}
func NewFakeDataFrame(name string, columns []string, rows [][]interface{}) FakeDataFrame {
i := 0
return FakeDataFrame{
i: &i,
Rows: rows,
ColumnNames: columns,
name: name,
}
}
func (f FakeDataFrame) Next() ([]interface{}, bool, error) {
if len(f.Rows) == *(f.i) {
return nil, false, nil
}
value := f.Rows[*f.i]
*f.i++
return value, true, nil
}
func (f FakeDataFrame) Columns() []string {
return f.ColumnNames
}
func (f FakeDataFrame) Name() string {
return f.name
}
func (f *FakeDataFrame) Reset() {
i := 0
f.i = &i
}
func (f FakeDataFrame) FilterColumns(excludeColumns []string) FakeDataFrame {
// get columns we can remove
rColumns := utils.Intersection(f.ColumnNames, excludeColumns)
rIndexes := make([]int, len(rColumns))
// find the indexes of the columns to remove
for i, column := range rColumns {
rIndexes[i] = utils.IndexOf(f.ColumnNames, column)
}
newRows := make([][]interface{}, len(f.Rows))
for r, row := range f.Rows {
newRow := row
for i, index := range rIndexes {
newRow = utils.Remove(newRow, index-i)
}
newRows[r] = newRow
}
f.Rows = newRows
f.ColumnNames = utils.Distinct(f.ColumnNames, excludeColumns)
return f
}
func (f FakeDataFrame) Limit(rowLimit int64) FakeDataFrame {
if rowLimit >= 0 {
if int64(len(f.Rows)) > rowLimit {
f.Rows = f.Rows[:rowLimit]
}
}
return f
}
func (f FakeDataFrame) Order(orderBy data.OrderBy) FakeDataFrame {
if orderBy.Column == "" {
return f
}
cIndex := utils.IndexOf(f.ColumnNames, orderBy.Column)
sort.Slice(f.Rows, func(i, j int) bool {
left := f.Rows[i][cIndex]
right := f.Rows[j][cIndex]
if iLeft, ok := left.(int); ok {
if orderBy.Order == data.Asc {
return iLeft < right.(int)
}
return iLeft > right.(int)
} else {
// we aren't a full db - revert to string order
sLeft := left.(string)
sRight := right.(string)
if orderBy.Order == data.Asc {
return sLeft < sRight
}
return sLeft > sRight
}
})
return f
}

View File

@ -1,16 +0,0 @@
package test
import "os"
const defaultClickHouseVersion = "latest"
func GetClickHouseTestVersion() string {
return GetEnv("CLICKHOUSE_VERSION", defaultClickHouseVersion)
}
func GetEnv(key, fallback string) string {
if value, ok := os.LookupEnv(key); ok {
return value
}
return fallback
}

View File

@ -1,95 +0,0 @@
package utils
import (
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"github.com/pkg/errors"
)
func FileExists(name string) (bool, error) {
f, err := os.Stat(name)
if err == nil {
if !f.IsDir() {
return true, nil
}
return false, fmt.Errorf("%s is a directory", name)
}
if errors.Is(err, os.ErrNotExist) {
return false, nil
}
return false, err
}
func DirExists(name string) (bool, error) {
f, err := os.Stat(name)
if err == nil {
if f.IsDir() {
return true, nil
}
return false, fmt.Errorf("%s is a file", name)
}
if errors.Is(err, os.ErrNotExist) {
return false, nil
}
return false, err
}
func CopyFile(sourceFilename string, destFilename string) error {
exists, err := FileExists(sourceFilename)
if err != nil {
return err
}
if !exists {
return fmt.Errorf("%s does not exist", sourceFilename)
}
source, err := os.Open(sourceFilename)
if err != nil {
return err
}
defer source.Close()
destDir := filepath.Dir(destFilename)
if err := os.MkdirAll(destDir, os.ModePerm); err != nil {
return errors.Wrapf(err, "unable to create directory %s", destDir)
}
destination, err := os.Create(destFilename)
if err != nil {
return err
}
defer destination.Close()
_, err = io.Copy(destination, source)
return err
}
// patterns passed are an OR - any can be satisfied and the file will be listed
func ListFilesInDirectory(directory string, patterns []string) ([]string, []error) {
var files []string
exists, err := DirExists(directory)
if err != nil {
return files, []error{err}
}
if !exists {
return files, []error{fmt.Errorf("directory %s does not exist", directory)}
}
var pathErrors []error
_ = filepath.Walk(directory, func(path string, info fs.FileInfo, err error) error {
if err != nil {
pathErrors = append(pathErrors, err)
} else if !info.IsDir() {
for _, pattern := range patterns {
if matched, err := filepath.Match(pattern, filepath.Base(path)); err != nil {
pathErrors = append(pathErrors, err)
} else if matched {
files = append(files, path)
}
}
}
return nil
})
return files, pathErrors
}

View File

@ -1,134 +0,0 @@
package utils_test
import (
"fmt"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/stretchr/testify/require"
)
func TestFileExists(t *testing.T) {
t.Run("returns true for file", func(t *testing.T) {
tempDir := t.TempDir()
filepath := path.Join(tempDir, "random.txt")
_, err := os.Create(filepath)
require.Nil(t, err)
exists, err := utils.FileExists(filepath)
require.True(t, exists)
require.Nil(t, err)
})
t.Run("doesn't return true for not existence file", func(t *testing.T) {
tempDir := t.TempDir()
file := path.Join(tempDir, "random.txt")
exists, err := utils.FileExists(file)
require.False(t, exists)
require.Nil(t, err)
})
t.Run("doesn't return true for directory", func(t *testing.T) {
tempDir := t.TempDir()
exists, err := utils.FileExists(tempDir)
require.False(t, exists)
require.NotNil(t, err)
require.Equal(t, fmt.Sprintf("%s is a directory", tempDir), err.Error())
})
}
func TestDirExists(t *testing.T) {
t.Run("doesn't return true for file", func(t *testing.T) {
tempDir := t.TempDir()
filepath := path.Join(tempDir, "random.txt")
_, err := os.Create(filepath)
require.Nil(t, err)
exists, err := utils.DirExists(filepath)
require.False(t, exists)
require.NotNil(t, err)
require.Equal(t, fmt.Sprintf("%s is a file", filepath), err.Error())
})
t.Run("returns true for directory", func(t *testing.T) {
tempDir := t.TempDir()
exists, err := utils.DirExists(tempDir)
require.True(t, exists)
require.Nil(t, err)
})
t.Run("doesn't return true random directory", func(t *testing.T) {
exists, err := utils.FileExists(fmt.Sprintf("%d", utils.MakeTimestamp()))
require.False(t, exists)
require.Nil(t, err)
})
}
func TestCopyFile(t *testing.T) {
t.Run("can copy file", func(t *testing.T) {
tempDir := t.TempDir()
sourcePath := path.Join(tempDir, "random.txt")
_, err := os.Create(sourcePath)
require.Nil(t, err)
destPath := path.Join(tempDir, "random-2.txt")
err = utils.CopyFile(sourcePath, destPath)
require.Nil(t, err)
})
t.Run("can copy nested file", func(t *testing.T) {
tempDir := t.TempDir()
sourcePath := path.Join(tempDir, "random.txt")
_, err := os.Create(sourcePath)
require.Nil(t, err)
destPath := path.Join(tempDir, "sub_dir", "random-2.txt")
err = utils.CopyFile(sourcePath, destPath)
require.Nil(t, err)
})
t.Run("fails when file does not exist", func(t *testing.T) {
tempDir := t.TempDir()
sourcePath := path.Join(tempDir, "random.txt")
destPath := path.Join(tempDir, "random-2.txt")
err := utils.CopyFile(sourcePath, destPath)
require.NotNil(t, err)
require.Equal(t, fmt.Sprintf("%s does not exist", sourcePath), err.Error())
})
}
func TestListFilesInDirectory(t *testing.T) {
tempDir := t.TempDir()
files := make([]string, 5)
for i := 0; i < 5; i++ {
fileDir := path.Join(tempDir, fmt.Sprintf("%d", i))
err := os.MkdirAll(fileDir, os.ModePerm)
require.Nil(t, err)
ext := ".txt"
if i%2 == 0 {
ext = ".csv"
}
filepath := path.Join(fileDir, fmt.Sprintf("random-%d%s", i, ext))
files[i] = filepath
_, err = os.Create(filepath)
require.Nil(t, err)
}
t.Run("can list all files", func(t *testing.T) {
mFiles, errs := utils.ListFilesInDirectory(tempDir, []string{"*"})
require.Len(t, mFiles, 5)
require.Empty(t, errs)
})
t.Run("can list by extension", func(t *testing.T) {
mFiles, errs := utils.ListFilesInDirectory(tempDir, []string{"*.csv"})
require.Len(t, mFiles, 3)
require.Empty(t, errs)
require.ElementsMatch(t, []string{files[0], files[2], files[4]}, mFiles)
})
t.Run("can list on multiple extensions files", func(t *testing.T) {
mFiles, errs := utils.ListFilesInDirectory(tempDir, []string{"*.csv", "*.txt"})
require.Len(t, mFiles, 5)
require.Empty(t, errs)
})
}

View File

@ -1,49 +0,0 @@
package utils
import (
"github.com/elastic/gosigar"
"strings"
)
func FindClickHouseProcesses() ([]gosigar.ProcArgs, error) {
pids := gosigar.ProcList{}
err := pids.Get()
if err != nil {
return nil, err
}
var clickhousePs []gosigar.ProcArgs
for _, pid := range pids.List {
args := gosigar.ProcArgs{}
if err := args.Get(pid); err != nil {
continue
}
if len(args.List) > 0 {
if strings.Contains(args.List[0], "clickhouse-server") {
clickhousePs = append(clickhousePs, args)
}
}
}
return clickhousePs, nil
}
func FindConfigsFromClickHouseProcesses() ([]string, error) {
clickhouseProcesses, err := FindClickHouseProcesses()
if err != nil {
return nil, err
}
var configs []string
if len(clickhouseProcesses) > 0 {
// we have candidate matches
for _, ps := range clickhouseProcesses {
for _, arg := range ps.List {
if strings.Contains(arg, "--config") {
configFile := strings.ReplaceAll(arg, "--config-file=", "")
// containers receive config with --config
configFile = strings.ReplaceAll(configFile, "--config=", "")
configs = append(configs, configFile)
}
}
}
}
return configs, err
}

View File

@ -1,97 +0,0 @@
//go:build !no_docker
package utils_test
import (
"context"
"fmt"
"io"
"os"
"path"
"strings"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
)
func getProcessesInContainer(t *testing.T, container testcontainers.Container) ([]string, error) {
result, reader, err := container.Exec(context.Background(), []string{"ps", "-aux"})
if err != nil {
return nil, err
}
require.Zero(t, result)
require.NotNil(t, reader)
b, err := io.ReadAll(reader)
if err != nil {
return nil, err
}
require.NotNil(t, b)
lines := strings.Split(string(b), "\n")
// discard PS header
return lines[1:], nil
}
func TestFindClickHouseProcessesAndConfigs(t *testing.T) {
t.Run("can find ClickHouse processes and configs", func(t *testing.T) {
// create a ClickHouse container
ctx := context.Background()
cwd, err := os.Getwd()
if err != nil {
fmt.Println("unable to read current directory", err)
os.Exit(1)
}
// run a ClickHouse container that guarantees that it runs only for the duration of the test
req := testcontainers.ContainerRequest{
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
ExposedPorts: []string{"9000/tcp"},
WaitingFor: wait.ForLog("Ready for connections"),
Mounts: testcontainers.ContainerMounts{
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../../../testdata/docker/custom.xml"),
},
Target: "/etc/clickhouse-server/config.d/custom.xml",
},
},
}
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
// can't test without container
panic(err)
}
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
defer clickhouseContainer.Terminate(ctx) //nolint
lines, err := getProcessesInContainer(t, clickhouseContainer)
require.Nil(t, err)
require.NotEmpty(t, lines)
for _, line := range lines {
parts := strings.Fields(line)
if len(parts) < 11 {
continue
}
if !strings.Contains(parts[10], "clickhouse-server") {
continue
}
require.Equal(t, "/usr/bin/clickhouse-server", parts[10])
require.Equal(t, "--config-file=/etc/clickhouse-server/config.xml", parts[11])
}
})
}

View File

@ -1,68 +0,0 @@
package utils
// Intersection of elements in s1 and s2
func Intersection(s1, s2 []string) (inter []string) {
hash := make(map[string]bool)
for _, e := range s1 {
hash[e] = false
}
for _, e := range s2 {
// If elements present in the hashmap then append intersection list.
if val, ok := hash[e]; ok {
if !val {
// only add once
inter = append(inter, e)
hash[e] = true
}
}
}
return inter
}
// Distinct returns elements in s1, not in s2
func Distinct(s1, s2 []string) (distinct []string) {
hash := make(map[string]bool)
for _, e := range s2 {
hash[e] = true
}
for _, e := range s1 {
if _, ok := hash[e]; !ok {
distinct = append(distinct, e)
}
}
return distinct
}
// Unique func Unique(s1 []string) (unique []string) returns unique elements in s1
func Unique(s1 []string) (unique []string) {
hash := make(map[string]bool)
for _, e := range s1 {
if _, ok := hash[e]; !ok {
unique = append(unique, e)
}
hash[e] = true
}
return unique
}
func Contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func IndexOf(s []string, e string) int {
for i, a := range s {
if a == e {
return i
}
}
return -1
}
func Remove(slice []interface{}, s int) []interface{} {
return append(slice[:s], slice[s+1:]...)
}

View File

@ -1,64 +0,0 @@
package utils_test
import (
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/stretchr/testify/require"
)
func TestIntersection(t *testing.T) {
t.Run("can perform intersection", func(t *testing.T) {
setA := []string{"A", "b", "C", "D", "E"}
setB := []string{"A", "B", "F", "C", "G"}
setC := utils.Intersection(setA, setB)
require.Len(t, setC, 2)
require.ElementsMatch(t, []string{"A", "C"}, setC)
})
}
func TestDistinct(t *testing.T) {
t.Run("can perform distinct", func(t *testing.T) {
setA := []string{"A", "b", "C", "D", "E"}
setB := []string{"A", "B", "F", "C", "G"}
setC := utils.Distinct(setA, setB)
require.Len(t, setC, 3)
require.ElementsMatch(t, []string{"b", "D", "E"}, setC)
})
t.Run("can perform distinct on empty", func(t *testing.T) {
setA := []string{"A", "b", "C", "D", "E"}
var setB []string
setC := utils.Distinct(setA, setB)
require.Len(t, setC, 5)
require.ElementsMatch(t, []string{"A", "b", "C", "D", "E"}, setC)
})
}
func TestContains(t *testing.T) {
t.Run("can perform contains", func(t *testing.T) {
setA := []string{"A", "b", "C", "D", "E"}
require.True(t, utils.Contains(setA, "A"))
require.True(t, utils.Contains(setA, "b"))
require.True(t, utils.Contains(setA, "C"))
require.True(t, utils.Contains(setA, "D"))
require.True(t, utils.Contains(setA, "E"))
require.False(t, utils.Contains(setA, "B"))
})
}
func TestUnique(t *testing.T) {
t.Run("can perform unique", func(t *testing.T) {
setA := []string{"A", "b", "D", "D", "E", "E", "A"}
setC := utils.Unique(setA)
require.Len(t, setC, 4)
require.ElementsMatch(t, []string{"A", "b", "D", "E"}, setC)
})
t.Run("can perform unique on empty", func(t *testing.T) {
var setA []string
setC := utils.Unique(setA)
require.Len(t, setC, 0)
})
}

View File

@ -1,7 +0,0 @@
package utils
import "time"
func MakeTimestamp() int64 {
return time.Now().UnixNano() / int64(time.Millisecond)
}

View File

@ -1,115 +0,0 @@
package internal
import (
c "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
o "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
"github.com/pkg/errors"
"github.com/rs/zerolog/log"
)
type runConfiguration struct {
id string
host string
port uint16
username string
password string
output string
collectors []string
collectorConfigs map[string]config.Configuration
outputConfig config.Configuration
}
func NewRunConfiguration(id string, host string, port uint16, username string, password string, output string, outputConfig config.Configuration,
collectors []string, collectorConfigs map[string]config.Configuration) *runConfiguration {
config := runConfiguration{
id: id,
host: host,
port: port,
username: username,
password: password,
collectors: collectors,
output: output,
collectorConfigs: collectorConfigs,
outputConfig: outputConfig,
}
return &config
}
func Capture(config *runConfiguration) {
bundles, err := collect(config)
if err != nil {
log.Fatal().Err(err).Msg("unable to perform collection")
}
log.Info().Msgf("collectors initialized")
if err = output(config, bundles); err != nil {
log.Fatal().Err(err).Msg("unable to create output")
}
log.Info().Msgf("bundle export complete")
}
func collect(config *runConfiguration) (map[string]*data.DiagnosticBundle, error) {
resourceManager := platform.GetResourceManager()
err := resourceManager.Connect(config.host, config.port, config.username, config.password)
if err != nil {
// if we can't connect this is fatal
log.Fatal().Err(err).Msg("Unable to connect to database")
}
//grab the required connectors - we pass what we can
bundles := make(map[string]*data.DiagnosticBundle)
log.Info().Msgf("connection established")
//these store our collection errors and will be output in the bundle
var collectorErrors [][]interface{}
for _, collectorName := range config.collectors {
collectorConfig := config.collectorConfigs[collectorName]
log.Info().Msgf("initializing %s collector", collectorName)
collector, err := c.GetCollectorByName(collectorName)
if err != nil {
log.Error().Err(err).Msgf("Unable to fetch collector %s", collectorName)
collectorErrors = append(collectorErrors, []interface{}{err.Error()})
continue
}
bundle, err := collector.Collect(collectorConfig)
if err != nil {
log.Error().Err(err).Msgf("Error in collector %s", collectorName)
collectorErrors = append(collectorErrors, []interface{}{err.Error()})
// this indicates a fatal error in the collector
continue
}
for _, fError := range bundle.Errors.Errors {
err = errors.Wrapf(fError, "Failure to collect frame in collector %s", collectorName)
collectorErrors = append(collectorErrors, []interface{}{err.Error()})
log.Warn().Msg(err.Error())
}
bundles[collectorName] = bundle
}
bundles["diag_trace"] = buildTraceBundle(collectorErrors)
return bundles, nil
}
func output(config *runConfiguration, bundles map[string]*data.DiagnosticBundle) error {
log.Info().Msgf("attempting to export bundle using %s output...", config.output)
output, err := o.GetOutputByName(config.output)
if err != nil {
return err
}
frameErrors, err := output.Write(config.id, bundles, config.outputConfig)
// we report over failing hard on frame errors - up to the output to determine what is fatal via error
for _, fError := range frameErrors.Errors {
log.Warn().Msgf("failure to write frame in output %s - %s", config.output, fError)
}
return err
}
func buildTraceBundle(collectorErrors [][]interface{}) *data.DiagnosticBundle {
errorBundle := data.DiagnosticBundle{
Frames: map[string]data.Frame{
"errors": data.NewMemoryFrame("errors", []string{"errors"}, collectorErrors),
},
Errors: data.FrameErrors{},
}
// add any other metrics from collection
return &errorBundle
}

View File

@ -1,130 +0,0 @@
//go:build !no_docker
package internal_test
import (
"context"
"fmt"
"io/ioutil"
"os"
"path"
"testing"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/clickhouse"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/collectors/system"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs"
_ "github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/outputs/file"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/config"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/wait"
)
// Execute a full default capture, with simple output, and check if a bundle is produced and it's not empty
func TestCapture(t *testing.T) {
// create a ClickHouse container
ctx := context.Background()
cwd, err := os.Getwd()
if err != nil {
// can't test without container
panic(err)
}
// for now, we test against a hardcoded database-server version but we should make this a property
req := testcontainers.ContainerRequest{
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
ExposedPorts: []string{"9000/tcp"},
WaitingFor: wait.ForLog("Ready for connections"),
Mounts: testcontainers.ContainerMounts{
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../testdata/docker/custom.xml"),
},
Target: "/etc/clickhouse-server/config.d/custom.xml",
},
{
Source: testcontainers.GenericBindMountSource{
HostPath: path.Join(cwd, "../testdata/docker/admin.xml"),
},
Target: "/etc/clickhouse-server/users.d/admin.xml",
},
},
}
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
ContainerRequest: req,
Started: true,
})
if err != nil {
// can't test without container
panic(err)
}
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
defer clickhouseContainer.Terminate(ctx) //nolint
tmrDir := t.TempDir()
port := p.Int()
// test a simple output exists
_, err = outputs.GetOutputByName("simple")
require.Nil(t, err)
// this relies on the simple out not changing its params - test will likely fail if so
outputConfig := config.Configuration{
Params: []config.ConfigParam{
config.StringParam{
Value: tmrDir,
Param: config.NewParam("directory", "Directory in which to create dump. Defaults to the current directory.", false),
},
config.StringOptions{
Value: "csv",
Options: []string{"csv"},
Param: config.NewParam("format", "Format of exported files", false),
},
config.BoolParam{
Value: true,
Param: config.NewParam("skip_archive", "Don't compress output to an archive", false),
},
},
}
// test default collectors
collectorNames := collectors.GetCollectorNames(true)
// grab all configs - only default will be used because of collectorNames
collectorConfigs, err := collectors.BuildConfigurationOptions()
require.Nil(t, err)
conf := internal.NewRunConfiguration("random", "localhost", uint16(port), "", "", "simple", outputConfig, collectorNames, collectorConfigs)
internal.Capture(conf)
outputDir := path.Join(tmrDir, "random")
_, err = os.Stat(outputDir)
require.Nil(t, err)
require.True(t, !os.IsNotExist(err))
files, err := ioutil.ReadDir(outputDir)
require.Nil(t, err)
require.Len(t, files, 1)
outputDir = path.Join(outputDir, files[0].Name())
// check we have a folder per collector i.e. collectorNames + diag_trace
files, err = ioutil.ReadDir(outputDir)
require.Nil(t, err)
require.Len(t, files, len(collectorNames)+1)
expectedFolders := append(collectorNames, "diag_trace")
for _, file := range files {
require.True(t, file.IsDir())
utils.Contains(expectedFolders, file.Name())
}
// we don't test the specific collector outputs but make sure something was written to system
systemFolder := path.Join(outputDir, "system")
files, err = ioutil.ReadDir(systemFolder)
require.Nil(t, err)
require.Greater(t, len(files), 0)
// test diag_trace
diagFolder := path.Join(outputDir, "diag_trace")
files, err = ioutil.ReadDir(diagFolder)
require.Nil(t, err)
require.Equal(t, 1, len(files))
require.FileExists(t, path.Join(diagFolder, "errors.csv"))
}

View File

@ -1,8 +0,0 @@
<clickhouse>
<network_max>5000000</network_max>
<test_profile>
<test_p>
</test_p>
</test_profile>
<pg_port>9008</pg_port>
</clickhouse>

View File

@ -1,20 +0,0 @@
<clickhouse>
<test_user>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
<password_sha256_hex>REPLACE_ME</password_sha256_hex>
<access_management>1</access_management>
</test_user>
<another_user>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
<passwird>REPLACE_ME</passwird>
<access_management>1</access_management>
</another_user>
</clickhouse>

View File

@ -1 +0,0 @@
network_max: 5000000

View File

@ -1,7 +0,0 @@
test_user:
password: 'REPLACE_ME'
networks:
ip: '::/0'
profile: default
quota: default
access_management: 1

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
<clickhouse>
<users>
<default>
<password remove="1"/>
<password_sha256_hex>REPLACE_ME</password_sha256_hex>
</default>
</users>
</clickhouse>

View File

@ -1,57 +0,0 @@
<clickhouse>
<!-- See also the files in users.d directory where the settings can be overridden. -->
<!-- Profiles of settings. -->
<include_from>../include/xml/user-include.xml</include_from>
<profiles>
<!-- Default settings. -->
<default>
<!-- Maximum memory usage for processing single query, in bytes. -->
<max_memory_usage>10000000000</max_memory_usage>
<load_balancing>random</load_balancing>
<log_query_threads>1</log_query_threads>
</default>
<!-- Profile that allows only read queries. -->
<readonly>
<readonly>1</readonly>
</readonly>
</profiles>
<!-- Users and ACL. -->
<users>
<test_user>
<include incl="test_user"></include>
</test_user>
<!-- If user name was not specified, 'default' user is used. -->
<default>
<password>REPLACE_ME</password>
<networks>
<ip>::/0</ip>
</networks>
<!-- Settings profile for user. -->
<profile>default</profile>
<!-- Quota for user. -->
<quota>default</quota>
<!-- User can create other users and grant rights to them. -->
<!-- <access_management>1</access_management> -->
</default>
</users>
<!-- Quotas. -->
<quotas>
<!-- Name of quota. -->
<default>
<!-- Limits for time interval. You could specify many intervals with different limits. -->
<interval>
<!-- Length of interval. -->
<duration>3600</duration>
<!-- No limits. Just calculate resource usage for time interval. -->
<queries>0</queries>
<errors>0</errors>
<result_rows>0</result_rows>
<read_rows>0</read_rows>
<execution_time>0</execution_time>
</interval>
</default>
</quotas>
</clickhouse>

View File

@ -1,927 +0,0 @@
# This is an example of a configuration file "config.xml" rewritten in YAML
# You can read this documentation for detailed information about YAML configuration:
# https://clickhouse.com/docs/en/operations/configuration-files/
# NOTE: User and query level settings are set up in "users.yaml" file.
# If you have accidentally specified user-level settings here, server won't start.
# You can either move the settings to the right place inside "users.xml" file
# or add skip_check_for_incorrect_settings: 1 here.
include_from: "../include/yaml/server-include.yaml"
logger:
# Possible levels [1]:
# - none (turns off logging)
# - fatal
# - critical
# - error
# - warning
# - notice
# - information
# - debug
# - trace
# [1]: https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/Logger.h#L105-L114
level: trace
log: /var/log/clickhouse-server/clickhouse-server.log
errorlog: /var/log/clickhouse-server/clickhouse-server.err.log
# Rotation policy
# See https://github.com/pocoproject/poco/blob/poco-1.9.4-release/Foundation/include/Poco/FileChannel.h#L54-L85
size: 1000M
count: 10
# console: 1
# Default behavior is autodetection (log to console if not daemon mode and is tty)
# Per level overrides (legacy):
# For example to suppress logging of the ConfigReloader you can use:
# NOTE: levels.logger is reserved, see below.
# levels:
# ConfigReloader: none
# Per level overrides:
# For example to suppress logging of the RBAC for default user you can use:
# (But please note that the logger name maybe changed from version to version, even after minor upgrade)
# levels:
# - logger:
# name: 'ContextAccess (default)'
# level: none
# - logger:
# name: 'DatabaseOrdinary (test)'
# level: none
# It is the name that will be shown in the clickhouse-client.
# By default, anything with "production" will be highlighted in red in query prompt.
# display_name: production
# Port for HTTP API. See also 'https_port' for secure connections.
# This interface is also used by ODBC and JDBC drivers (DataGrip, Dbeaver, ...)
# and by most of web interfaces (embedded UI, Grafana, Redash, ...).
http_port: 8123
# Port for interaction by native protocol with:
# - clickhouse-client and other native ClickHouse tools (clickhouse-benchmark);
# - clickhouse-server with other clickhouse-servers for distributed query processing;
# - ClickHouse drivers and applications supporting native protocol
# (this protocol is also informally called as "the TCP protocol");
# See also 'tcp_port_secure' for secure connections.
tcp_port: 9000
# Compatibility with MySQL protocol.
# ClickHouse will pretend to be MySQL for applications connecting to this port.
mysql_port: 9004
# Compatibility with PostgreSQL protocol.
# ClickHouse will pretend to be PostgreSQL for applications connecting to this port.
postgresql_port: 9005
# HTTP API with TLS (HTTPS).
# You have to configure certificate to enable this interface.
# See the openSSL section below.
# https_port: 8443
# Native interface with TLS.
# You have to configure certificate to enable this interface.
# See the openSSL section below.
# tcp_port_secure: 9440
# Native interface wrapped with PROXYv1 protocol
# PROXYv1 header sent for every connection.
# ClickHouse will extract information about proxy-forwarded client address from the header.
# tcp_with_proxy_port: 9011
# Port for communication between replicas. Used for data exchange.
# It provides low-level data access between servers.
# This port should not be accessible from untrusted networks.
# See also 'interserver_http_credentials'.
# Data transferred over connections to this port should not go through untrusted networks.
# See also 'interserver_https_port'.
interserver_http_port: 9009
# Port for communication between replicas with TLS.
# You have to configure certificate to enable this interface.
# See the openSSL section below.
# See also 'interserver_http_credentials'.
# interserver_https_port: 9010
# Hostname that is used by other replicas to request this server.
# If not specified, than it is determined analogous to 'hostname -f' command.
# This setting could be used to switch replication to another network interface
# (the server may be connected to multiple networks via multiple addresses)
# interserver_http_host: example.yandex.ru
# You can specify credentials for authenthication between replicas.
# This is required when interserver_https_port is accessible from untrusted networks,
# and also recommended to avoid SSRF attacks from possibly compromised services in your network.
# interserver_http_credentials:
# user: interserver
# password: ''
# Listen specified address.
# Use :: (wildcard IPv6 address), if you want to accept connections both with IPv4 and IPv6 from everywhere.
# Notes:
# If you open connections from wildcard address, make sure that at least one of the following measures applied:
# - server is protected by firewall and not accessible from untrusted networks;
# - all users are restricted to subset of network addresses (see users.xml);
# - all users have strong passwords, only secure (TLS) interfaces are accessible, or connections are only made via TLS interfaces.
# - users without password have readonly access.
# See also: https://www.shodan.io/search?query=clickhouse
# listen_host: '::'
# Same for hosts without support for IPv6:
# listen_host: 0.0.0.0
# Default values - try listen localhost on IPv4 and IPv6.
# listen_host: '::1'
# listen_host: 127.0.0.1
# Don't exit if IPv6 or IPv4 networks are unavailable while trying to listen.
# listen_try: 0
# Allow multiple servers to listen on the same address:port. This is not recommended.
# listen_reuse_port: 0
# listen_backlog: 64
max_connections: 4096
# For 'Connection: keep-alive' in HTTP 1.1
keep_alive_timeout: 3
# gRPC protocol (see src/Server/grpc_protos/clickhouse_grpc.proto for the API)
# grpc_port: 9100
grpc:
enable_ssl: false
# The following two files are used only if enable_ssl=1
ssl_cert_file: /path/to/ssl_cert_file
ssl_key_file: /path/to/ssl_key_file
# Whether server will request client for a certificate
ssl_require_client_auth: false
# The following file is used only if ssl_require_client_auth=1
ssl_ca_cert_file: /path/to/ssl_ca_cert_file
# Default compression algorithm (applied if client doesn't specify another algorithm).
# Supported algorithms: none, deflate, gzip, stream_gzip
compression: deflate
# Default compression level (applied if client doesn't specify another level).
# Supported levels: none, low, medium, high
compression_level: medium
# Send/receive message size limits in bytes. -1 means unlimited
max_send_message_size: -1
max_receive_message_size: -1
# Enable if you want very detailed logs
verbose_logs: false
# Used with https_port and tcp_port_secure. Full ssl options list: https://github.com/ClickHouse-Extras/poco/blob/master/NetSSL_OpenSSL/include/Poco/Net/SSLManager.h#L71
openSSL:
server:
# Used for https server AND secure tcp port
# openssl req -subj "/CN=localhost" -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout /etc/clickhouse-server/server.key -out /etc/clickhouse-server/server.crt
certificateFile: /etc/clickhouse-server/server.crt
privateKeyFile: /etc/clickhouse-server/server.key
# dhparams are optional. You can delete the dhParamsFile: element.
# To generate dhparams, use the following command:
# openssl dhparam -out /etc/clickhouse-server/dhparam.pem 4096
# Only file format with BEGIN DH PARAMETERS is supported.
dhParamsFile: /etc/clickhouse-server/dhparam.pem
verificationMode: none
loadDefaultCAFile: true
cacheSessions: true
disableProtocols: 'sslv2,sslv3'
preferServerCiphers: true
client:
# Used for connecting to https dictionary source and secured Zookeeper communication
loadDefaultCAFile: true
cacheSessions: true
disableProtocols: 'sslv2,sslv3'
preferServerCiphers: true
# Use for self-signed: verificationMode: none
invalidCertificateHandler:
# Use for self-signed: name: AcceptCertificateHandler
name: RejectCertificateHandler
# Default root page on http[s] server. For example load UI from https://tabix.io/ when opening http://localhost:8123
# http_server_default_response: |-
# <html ng-app="SMI2"><head><base href="http://ui.tabix.io/"></head><body><div ui-view="" class="content-ui"></div><script src="http://loader.tabix.io/master.js"></script></body></html>
# Maximum number of concurrent queries.
max_concurrent_queries: 100
# Maximum memory usage (resident set size) for server process.
# Zero value or unset means default. Default is "max_server_memory_usage_to_ram_ratio" of available physical RAM.
# If the value is larger than "max_server_memory_usage_to_ram_ratio" of available physical RAM, it will be cut down.
# The constraint is checked on query execution time.
# If a query tries to allocate memory and the current memory usage plus allocation is greater
# than specified threshold, exception will be thrown.
# It is not practical to set this constraint to small values like just a few gigabytes,
# because memory allocator will keep this amount of memory in caches and the server will deny service of queries.
max_server_memory_usage: 0
# Maximum number of threads in the Global thread pool.
# This will default to a maximum of 10000 threads if not specified.
# This setting will be useful in scenarios where there are a large number
# of distributed queries that are running concurrently but are idling most
# of the time, in which case a higher number of threads might be required.
max_thread_pool_size: 10000
# On memory constrained environments you may have to set this to value larger than 1.
max_server_memory_usage_to_ram_ratio: 0.9
# Simple server-wide memory profiler. Collect a stack trace at every peak allocation step (in bytes).
# Data will be stored in system.trace_log table with query_id = empty string.
# Zero means disabled.
total_memory_profiler_step: 4194304
# Collect random allocations and deallocations and write them into system.trace_log with 'MemorySample' trace_type.
# The probability is for every alloc/free regardless to the size of the allocation.
# Note that sampling happens only when the amount of untracked memory exceeds the untracked memory limit,
# which is 4 MiB by default but can be lowered if 'total_memory_profiler_step' is lowered.
# You may want to set 'total_memory_profiler_step' to 1 for extra fine grained sampling.
total_memory_tracker_sample_probability: 0
# Set limit on number of open files (default: maximum). This setting makes sense on Mac OS X because getrlimit() fails to retrieve
# correct maximum value.
# max_open_files: 262144
# Size of cache of uncompressed blocks of data, used in tables of MergeTree family.
# In bytes. Cache is single for server. Memory is allocated only on demand.
# Cache is used when 'use_uncompressed_cache' user setting turned on (off by default).
# Uncompressed cache is advantageous only for very short queries and in rare cases.
# Note: uncompressed cache can be pointless for lz4, because memory bandwidth
# is slower than multi-core decompression on some server configurations.
# Enabling it can sometimes paradoxically make queries slower.
uncompressed_cache_size: 8589934592
# Approximate size of mark cache, used in tables of MergeTree family.
# In bytes. Cache is single for server. Memory is allocated only on demand.
# You should not lower this value.
mark_cache_size: 5368709120
# If you enable the `min_bytes_to_use_mmap_io` setting,
# the data in MergeTree tables can be read with mmap to avoid copying from kernel to userspace.
# It makes sense only for large files and helps only if data reside in page cache.
# To avoid frequent open/mmap/munmap/close calls (which are very expensive due to consequent page faults)
# and to reuse mappings from several threads and queries,
# the cache of mapped files is maintained. Its size is the number of mapped regions (usually equal to the number of mapped files).
# The amount of data in mapped files can be monitored
# in system.metrics, system.metric_log by the MMappedFiles, MMappedFileBytes metrics
# and in system.asynchronous_metrics, system.asynchronous_metrics_log by the MMapCacheCells metric,
# and also in system.events, system.processes, system.query_log, system.query_thread_log, system.query_views_log by the
# CreatedReadBufferMMap, CreatedReadBufferMMapFailed, MMappedFileCacheHits, MMappedFileCacheMisses events.
# Note that the amount of data in mapped files does not consume memory directly and is not accounted
# in query or server memory usage - because this memory can be discarded similar to OS page cache.
# The cache is dropped (the files are closed) automatically on removal of old parts in MergeTree,
# also it can be dropped manually by the SYSTEM DROP MMAP CACHE query.
mmap_cache_size: 1000
# Cache size in bytes for compiled expressions.
compiled_expression_cache_size: 134217728
# Cache size in elements for compiled expressions.
compiled_expression_cache_elements_size: 10000
# Path to data directory, with trailing slash.
path: /var/lib/clickhouse/
# Path to temporary data for processing hard queries.
tmp_path: /var/lib/clickhouse/tmp/
# Policy from the <storage_configuration> for the temporary files.
# If not set <tmp_path> is used, otherwise <tmp_path> is ignored.
# Notes:
# - move_factor is ignored
# - keep_free_space_bytes is ignored
# - max_data_part_size_bytes is ignored
# - you must have exactly one volume in that policy
# tmp_policy: tmp
# Directory with user provided files that are accessible by 'file' table function.
user_files_path: /var/lib/clickhouse/user_files/
# LDAP server definitions.
ldap_servers: ''
# List LDAP servers with their connection parameters here to later 1) use them as authenticators for dedicated local users,
# who have 'ldap' authentication mechanism specified instead of 'password', or to 2) use them as remote user directories.
# Parameters:
# host - LDAP server hostname or IP, this parameter is mandatory and cannot be empty.
# port - LDAP server port, default is 636 if enable_tls is set to true, 389 otherwise.
# bind_dn - template used to construct the DN to bind to.
# The resulting DN will be constructed by replacing all '{user_name}' substrings of the template with the actual
# user name during each authentication attempt.
# user_dn_detection - section with LDAP search parameters for detecting the actual user DN of the bound user.
# This is mainly used in search filters for further role mapping when the server is Active Directory. The
# resulting user DN will be used when replacing '{user_dn}' substrings wherever they are allowed. By default,
# user DN is set equal to bind DN, but once search is performed, it will be updated with to the actual detected
# user DN value.
# base_dn - template used to construct the base DN for the LDAP search.
# The resulting DN will be constructed by replacing all '{user_name}' and '{bind_dn}' substrings
# of the template with the actual user name and bind DN during the LDAP search.
# scope - scope of the LDAP search.
# Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
# search_filter - template used to construct the search filter for the LDAP search.
# The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', and '{base_dn}'
# substrings of the template with the actual user name, bind DN, and base DN during the LDAP search.
# Note, that the special characters must be escaped properly in XML.
# verification_cooldown - a period of time, in seconds, after a successful bind attempt, during which a user will be assumed
# to be successfully authenticated for all consecutive requests without contacting the LDAP server.
# Specify 0 (the default) to disable caching and force contacting the LDAP server for each authentication request.
# enable_tls - flag to trigger use of secure connection to the LDAP server.
# Specify 'no' for plain text (ldap://) protocol (not recommended).
# Specify 'yes' for LDAP over SSL/TLS (ldaps://) protocol (recommended, the default).
# Specify 'starttls' for legacy StartTLS protocol (plain text (ldap://) protocol, upgraded to TLS).
# tls_minimum_protocol_version - the minimum protocol version of SSL/TLS.
# Accepted values are: 'ssl2', 'ssl3', 'tls1.0', 'tls1.1', 'tls1.2' (the default).
# tls_require_cert - SSL/TLS peer certificate verification behavior.
# Accepted values are: 'never', 'allow', 'try', 'demand' (the default).
# tls_cert_file - path to certificate file.
# tls_key_file - path to certificate key file.
# tls_ca_cert_file - path to CA certificate file.
# tls_ca_cert_dir - path to the directory containing CA certificates.
# tls_cipher_suite - allowed cipher suite (in OpenSSL notation).
# Example:
# my_ldap_server:
# host: localhost
# port: 636
# bind_dn: 'uid={user_name},ou=users,dc=example,dc=com'
# verification_cooldown: 300
# enable_tls: yes
# tls_minimum_protocol_version: tls1.2
# tls_require_cert: demand
# tls_cert_file: /path/to/tls_cert_file
# tls_key_file: /path/to/tls_key_file
# tls_ca_cert_file: /path/to/tls_ca_cert_file
# tls_ca_cert_dir: /path/to/tls_ca_cert_dir
# tls_cipher_suite: ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:AES256-GCM-SHA384
# Example (typical Active Directory with configured user DN detection for further role mapping):
# my_ad_server:
# host: localhost
# port: 389
# bind_dn: 'EXAMPLE\{user_name}'
# user_dn_detection:
# base_dn: CN=Users,DC=example,DC=com
# search_filter: '(&amp;(objectClass=user)(sAMAccountName={user_name}))'
# enable_tls: no
# To enable Kerberos authentication support for HTTP requests (GSS-SPNEGO), for those users who are explicitly configured
# to authenticate via Kerberos, define a single 'kerberos' section here.
# Parameters:
# principal - canonical service principal name, that will be acquired and used when accepting security contexts.
# This parameter is optional, if omitted, the default principal will be used.
# This parameter cannot be specified together with 'realm' parameter.
# realm - a realm, that will be used to restrict authentication to only those requests whose initiator's realm matches it.
# This parameter is optional, if omitted, no additional filtering by realm will be applied.
# This parameter cannot be specified together with 'principal' parameter.
# Example:
# kerberos: ''
# Example:
# kerberos:
# principal: HTTP/clickhouse.example.com@EXAMPLE.COM
# Example:
# kerberos:
# realm: EXAMPLE.COM
# Sources to read users, roles, access rights, profiles of settings, quotas.
user_directories:
users_xml:
# Path to configuration file with predefined users.
path: users.yaml
local_directory:
# Path to folder where users created by SQL commands are stored.
path: /var/lib/clickhouse/access/
# # To add an LDAP server as a remote user directory of users that are not defined locally, define a single 'ldap' section
# # with the following parameters:
# # server - one of LDAP server names defined in 'ldap_servers' config section above.
# # This parameter is mandatory and cannot be empty.
# # roles - section with a list of locally defined roles that will be assigned to each user retrieved from the LDAP server.
# # If no roles are specified here or assigned during role mapping (below), user will not be able to perform any
# # actions after authentication.
# # role_mapping - section with LDAP search parameters and mapping rules.
# # When a user authenticates, while still bound to LDAP, an LDAP search is performed using search_filter and the
# # name of the logged in user. For each entry found during that search, the value of the specified attribute is
# # extracted. For each attribute value that has the specified prefix, the prefix is removed, and the rest of the
# # value becomes the name of a local role defined in ClickHouse, which is expected to be created beforehand by
# # CREATE ROLE command.
# # There can be multiple 'role_mapping' sections defined inside the same 'ldap' section. All of them will be
# # applied.
# # base_dn - template used to construct the base DN for the LDAP search.
# # The resulting DN will be constructed by replacing all '{user_name}', '{bind_dn}', and '{user_dn}'
# # substrings of the template with the actual user name, bind DN, and user DN during each LDAP search.
# # scope - scope of the LDAP search.
# # Accepted values are: 'base', 'one_level', 'children', 'subtree' (the default).
# # search_filter - template used to construct the search filter for the LDAP search.
# # The resulting filter will be constructed by replacing all '{user_name}', '{bind_dn}', '{user_dn}', and
# # '{base_dn}' substrings of the template with the actual user name, bind DN, user DN, and base DN during
# # each LDAP search.
# # Note, that the special characters must be escaped properly in XML.
# # attribute - attribute name whose values will be returned by the LDAP search. 'cn', by default.
# # prefix - prefix, that will be expected to be in front of each string in the original list of strings returned by
# # the LDAP search. Prefix will be removed from the original strings and resulting strings will be treated
# # as local role names. Empty, by default.
# # Example:
# # ldap:
# # server: my_ldap_server
# # roles:
# # my_local_role1: ''
# # my_local_role2: ''
# # role_mapping:
# # base_dn: 'ou=groups,dc=example,dc=com'
# # scope: subtree
# # search_filter: '(&amp;(objectClass=groupOfNames)(member={bind_dn}))'
# # attribute: cn
# # prefix: clickhouse_
# # Example (typical Active Directory with role mapping that relies on the detected user DN):
# # ldap:
# # server: my_ad_server
# # role_mapping:
# # base_dn: 'CN=Users,DC=example,DC=com'
# # attribute: CN
# # scope: subtree
# # search_filter: '(&amp;(objectClass=group)(member={user_dn}))'
# # prefix: clickhouse_
# Default profile of settings.
default_profile: default
# Comma-separated list of prefixes for user-defined settings.
# custom_settings_prefixes: ''
# System profile of settings. This settings are used by internal processes (Distributed DDL worker and so on).
# system_profile: default
# Buffer profile of settings.
# This settings are used by Buffer storage to flush data to the underlying table.
# Default: used from system_profile directive.
# buffer_profile: default
# Default database.
default_database: default
# Server time zone could be set here.
# Time zone is used when converting between String and DateTime types,
# when printing DateTime in text formats and parsing DateTime from text,
# it is used in date and time related functions, if specific time zone was not passed as an argument.
# Time zone is specified as identifier from IANA time zone database, like UTC or Africa/Abidjan.
# If not specified, system time zone at server startup is used.
# Please note, that server could display time zone alias instead of specified name.
# Example: W-SU is an alias for Europe/Moscow and Zulu is an alias for UTC.
# timezone: Europe/Moscow
# You can specify umask here (see "man umask"). Server will apply it on startup.
# Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read).
# umask: 022
# Perform mlockall after startup to lower first queries latency
# and to prevent clickhouse executable from being paged out under high IO load.
# Enabling this option is recommended but will lead to increased startup time for up to a few seconds.
mlock_executable: true
# Reallocate memory for machine code ("text") using huge pages. Highly experimental.
remap_executable: false
# Uncomment below in order to use JDBC table engine and function.
# To install and run JDBC bridge in background:
# * [Debian/Ubuntu]
# export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
# export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
# wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
# apt install --no-install-recommends -f ./clickhouse-jdbc-bridge_$PKG_VER-1_all.deb
# clickhouse-jdbc-bridge &
# * [CentOS/RHEL]
# export MVN_URL=https://repo1.maven.org/maven2/ru/yandex/clickhouse/clickhouse-jdbc-bridge
# export PKG_VER=$(curl -sL $MVN_URL/maven-metadata.xml | grep '<release>' | sed -e 's|.*>\(.*\)<.*|\1|')
# wget https://github.com/ClickHouse/clickhouse-jdbc-bridge/releases/download/v$PKG_VER/clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
# yum localinstall -y clickhouse-jdbc-bridge-$PKG_VER-1.noarch.rpm
# clickhouse-jdbc-bridge &
# Please refer to https://github.com/ClickHouse/clickhouse-jdbc-bridge#usage for more information.
# jdbc_bridge:
# host: 127.0.0.1
# port: 9019
# Configuration of clusters that could be used in Distributed tables.
# https://clickhouse.com/docs/en/operations/table_engines/distributed/
remote_servers:
# Test only shard config for testing distributed storage
test_shard_localhost:
# Inter-server per-cluster secret for Distributed queries
# default: no secret (no authentication will be performed)
# If set, then Distributed queries will be validated on shards, so at least:
# - such cluster should exist on the shard,
# - such cluster should have the same secret.
# And also (and which is more important), the initial_user will
# be used as current user for the query.
# Right now the protocol is pretty simple and it only takes into account:
# - cluster name
# - query
# Also it will be nice if the following will be implemented:
# - source hostname (see interserver_http_host), but then it will depends from DNS,
# it can use IP address instead, but then the you need to get correct on the initiator node.
# - target hostname / ip address (same notes as for source hostname)
# - time-based security tokens
secret: 'REPLACE_ME'
shard:
# Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas).
# internal_replication: false
# Optional. Shard weight when writing data. Default: 1.
# weight: 1
replica:
host: localhost
port: 9000
# Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority).
# priority: 1
# The list of hosts allowed to use in URL-related storage engines and table functions.
# If this section is not present in configuration, all hosts are allowed.
# remote_url_allow_hosts:
# Host should be specified exactly as in URL. The name is checked before DNS resolution.
# Example: "yandex.ru", "yandex.ru." and "www.yandex.ru" are different hosts.
# If port is explicitly specified in URL, the host:port is checked as a whole.
# If host specified here without port, any port with this host allowed.
# "yandex.ru" -> "yandex.ru:443", "yandex.ru:80" etc. is allowed, but "yandex.ru:80" -> only "yandex.ru:80" is allowed.
# If the host is specified as IP address, it is checked as specified in URL. Example: "[2a02:6b8:a::a]".
# If there are redirects and support for redirects is enabled, every redirect (the Location field) is checked.
# Regular expression can be specified. RE2 engine is used for regexps.
# Regexps are not aligned: don't forget to add ^ and $. Also don't forget to escape dot (.) metacharacter
# (forgetting to do so is a common source of error).
# If element has 'incl' attribute, then for it's value will be used corresponding substitution from another file.
# By default, path to file with substitutions is /etc/metrika.xml. It could be changed in config in 'include_from' element.
# Values for substitutions are specified in /clickhouse/name_of_substitution elements in that file.
# ZooKeeper is used to store metadata about replicas, when using Replicated tables.
# Optional. If you don't use replicated tables, you could omit that.
# See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/
# zookeeper:
# - node:
# host: example1
# port: 2181
# - node:
# host: example2
# port: 2181
# - node:
# host: example3
# port: 2181
# Substitutions for parameters of replicated tables.
# Optional. If you don't use replicated tables, you could omit that.
# See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/#creating-replicated-tables
# macros:
# shard: 01
# replica: example01-01-1
# Reloading interval for embedded dictionaries, in seconds. Default: 3600.
builtin_dictionaries_reload_interval: 3600
# Maximum session timeout, in seconds. Default: 3600.
max_session_timeout: 3600
# Default session timeout, in seconds. Default: 60.
default_session_timeout: 60
# Sending data to Graphite for monitoring. Several sections can be defined.
# interval - send every X second
# root_path - prefix for keys
# hostname_in_path - append hostname to root_path (default = true)
# metrics - send data from table system.metrics
# events - send data from table system.events
# asynchronous_metrics - send data from table system.asynchronous_metrics
# graphite:
# host: localhost
# port: 42000
# timeout: 0.1
# interval: 60
# root_path: one_min
# hostname_in_path: true
# metrics: true
# events: true
# events_cumulative: false
# asynchronous_metrics: true
# graphite:
# host: localhost
# port: 42000
# timeout: 0.1
# interval: 1
# root_path: one_sec
# metrics: true
# events: true
# events_cumulative: false
# asynchronous_metrics: false
# Serve endpoint for Prometheus monitoring.
# endpoint - mertics path (relative to root, statring with "/")
# port - port to setup server. If not defined or 0 than http_port used
# metrics - send data from table system.metrics
# events - send data from table system.events
# asynchronous_metrics - send data from table system.asynchronous_metrics
# prometheus:
# endpoint: /metrics
# port: 9363
# metrics: true
# events: true
# asynchronous_metrics: true
# Query log. Used only for queries with setting log_queries = 1.
query_log:
# What table to insert data. If table is not exist, it will be created.
# When query log structure is changed after system update,
# then old table will be renamed and new table will be created automatically.
database: system
table: query_log
# PARTITION BY expr: https://clickhouse.com/docs/en/table_engines/mergetree-family/custom_partitioning_key/
# Example:
# event_date
# toMonday(event_date)
# toYYYYMM(event_date)
# toStartOfHour(event_time)
partition_by: toYYYYMM(event_date)
# Table TTL specification: https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/mergetree/#mergetree-table-ttl
# Example:
# event_date + INTERVAL 1 WEEK
# event_date + INTERVAL 7 DAY DELETE
# event_date + INTERVAL 2 WEEK TO DISK 'bbb'
# ttl: 'event_date + INTERVAL 30 DAY DELETE'
# Instead of partition_by, you can provide full engine expression (starting with ENGINE = ) with parameters,
# Example: engine: 'ENGINE = MergeTree PARTITION BY toYYYYMM(event_date) ORDER BY (event_date, event_time) SETTINGS index_granularity = 1024'
# Interval of flushing data.
flush_interval_milliseconds: 7500
# Trace log. Stores stack traces collected by query profilers.
# See query_profiler_real_time_period_ns and query_profiler_cpu_time_period_ns settings.
trace_log:
database: system
table: trace_log
partition_by: toYYYYMM(event_date)
flush_interval_milliseconds: 7500
# Query thread log. Has information about all threads participated in query execution.
# Used only for queries with setting log_query_threads = 1.
query_thread_log:
database: system
table: query_thread_log
partition_by: toYYYYMM(event_date)
flush_interval_milliseconds: 7500
# Query views log. Has information about all dependent views associated with a query.
# Used only for queries with setting log_query_views = 1.
query_views_log:
database: system
table: query_views_log
partition_by: toYYYYMM(event_date)
flush_interval_milliseconds: 7500
# Uncomment if use part log.
# Part log contains information about all actions with parts in MergeTree tables (creation, deletion, merges, downloads).
part_log:
database: system
table: part_log
partition_by: toYYYYMM(event_date)
flush_interval_milliseconds: 7500
# Uncomment to write text log into table.
# Text log contains all information from usual server log but stores it in structured and efficient way.
# The level of the messages that goes to the table can be limited (<level>), if not specified all messages will go to the table.
# text_log:
# database: system
# table: text_log
# flush_interval_milliseconds: 7500
# level: ''
# Metric log contains rows with current values of ProfileEvents, CurrentMetrics collected with "collect_interval_milliseconds" interval.
metric_log:
database: system
table: metric_log
flush_interval_milliseconds: 7500
collect_interval_milliseconds: 1000
# Asynchronous metric log contains values of metrics from
# system.asynchronous_metrics.
asynchronous_metric_log:
database: system
table: asynchronous_metric_log
# Asynchronous metrics are updated once a minute, so there is
# no need to flush more often.
flush_interval_milliseconds: 60000
# OpenTelemetry log contains OpenTelemetry trace spans.
opentelemetry_span_log:
# The default table creation code is insufficient, this <engine> spec
# is a workaround. There is no 'event_time' for this log, but two times,
# start and finish. It is sorted by finish time, to avoid inserting
# data too far away in the past (probably we can sometimes insert a span
# that is seconds earlier than the last span in the table, due to a race
# between several spans inserted in parallel). This gives the spans a
# global order that we can use to e.g. retry insertion into some external
# system.
engine: |-
engine MergeTree
partition by toYYYYMM(finish_date)
order by (finish_date, finish_time_us, trace_id)
database: system
table: opentelemetry_span_log
flush_interval_milliseconds: 7500
# Crash log. Stores stack traces for fatal errors.
# This table is normally empty.
crash_log:
database: system
table: crash_log
partition_by: ''
flush_interval_milliseconds: 1000
# Parameters for embedded dictionaries, used in Yandex.Metrica.
# See https://clickhouse.com/docs/en/dicts/internal_dicts/
# Path to file with region hierarchy.
# path_to_regions_hierarchy_file: /opt/geo/regions_hierarchy.txt
# Path to directory with files containing names of regions
# path_to_regions_names_files: /opt/geo/
# top_level_domains_path: /var/lib/clickhouse/top_level_domains/
# Custom TLD lists.
# Format: name: /path/to/file
# Changes will not be applied w/o server restart.
# Path to the list is under top_level_domains_path (see above).
top_level_domains_lists: ''
# public_suffix_list: /path/to/public_suffix_list.dat
# Configuration of external dictionaries. See:
# https://clickhouse.com/docs/en/sql-reference/dictionaries/external-dictionaries/external-dicts
dictionaries_config: '*_dictionary.xml'
# Uncomment if you want data to be compressed 30-100% better.
# Don't do that if you just started using ClickHouse.
# compression:
# # Set of variants. Checked in order. Last matching case wins. If nothing matches, lz4 will be used.
# case:
# Conditions. All must be satisfied. Some conditions may be omitted.
# # min_part_size: 10000000000 # Min part size in bytes.
# # min_part_size_ratio: 0.01 # Min size of part relative to whole table size.
# # What compression method to use.
# method: zstd
# Allow to execute distributed DDL queries (CREATE, DROP, ALTER, RENAME) on cluster.
# Works only if ZooKeeper is enabled. Comment it if such functionality isn't required.
distributed_ddl:
# Path in ZooKeeper to queue with DDL queries
path: /clickhouse/task_queue/ddl
# Settings from this profile will be used to execute DDL queries
# profile: default
# Controls how much ON CLUSTER queries can be run simultaneously.
# pool_size: 1
# Cleanup settings (active tasks will not be removed)
# Controls task TTL (default 1 week)
# task_max_lifetime: 604800
# Controls how often cleanup should be performed (in seconds)
# cleanup_delay_period: 60
# Controls how many tasks could be in the queue
# max_tasks_in_queue: 1000
# Settings to fine tune MergeTree tables. See documentation in source code, in MergeTreeSettings.h
# merge_tree:
# max_suspicious_broken_parts: 5
# Protection from accidental DROP.
# If size of a MergeTree table is greater than max_table_size_to_drop (in bytes) than table could not be dropped with any DROP query.
# If you want do delete one table and don't want to change clickhouse-server config, you could create special file <clickhouse-path>/flags/force_drop_table and make DROP once.
# By default max_table_size_to_drop is 50GB; max_table_size_to_drop=0 allows to DROP any tables.
# The same for max_partition_size_to_drop.
# Uncomment to disable protection.
# max_table_size_to_drop: 0
# max_partition_size_to_drop: 0
# Example of parameters for GraphiteMergeTree table engine
graphite_rollup_example:
pattern:
regexp: click_cost
function: any
retention:
- age: 0
precision: 3600
- age: 86400
precision: 60
default:
function: max
retention:
- age: 0
precision: 60
- age: 3600
precision: 300
- age: 86400
precision: 3600
# Directory in <clickhouse-path> containing schema files for various input formats.
# The directory will be created if it doesn't exist.
format_schema_path: /var/lib/clickhouse/format_schemas/
# Default query masking rules, matching lines would be replaced with something else in the logs
# (both text logs and system.query_log).
# name - name for the rule (optional)
# regexp - RE2 compatible regular expression (mandatory)
# replace - substitution string for sensitive data (optional, by default - six asterisks)
query_masking_rules:
rule:
name: hide encrypt/decrypt arguments
regexp: '((?:aes_)?(?:encrypt|decrypt)(?:_mysql)?)\s*\(\s*(?:''(?:\\''|.)+''|.*?)\s*\)'
# or more secure, but also more invasive:
# (aes_\w+)\s*\(.*\)
replace: \1(???)
# Uncomment to use custom http handlers.
# rules are checked from top to bottom, first match runs the handler
# url - to match request URL, you can use 'regex:' prefix to use regex match(optional)
# methods - to match request method, you can use commas to separate multiple method matches(optional)
# headers - to match request headers, match each child element(child element name is header name), you can use 'regex:' prefix to use regex match(optional)
# handler is request handler
# type - supported types: static, dynamic_query_handler, predefined_query_handler
# query - use with predefined_query_handler type, executes query when the handler is called
# query_param_name - use with dynamic_query_handler type, extracts and executes the value corresponding to the <query_param_name> value in HTTP request params
# status - use with static type, response status code
# content_type - use with static type, response content-type
# response_content - use with static type, Response content sent to client, when using the prefix 'file://' or 'config://', find the content from the file or configuration send to client.
# http_handlers:
# - rule:
# url: /
# methods: POST,GET
# headers:
# pragma: no-cache
# handler:
# type: dynamic_query_handler
# query_param_name: query
# - rule:
# url: /predefined_query
# methods: POST,GET
# handler:
# type: predefined_query_handler
# query: 'SELECT * FROM system.settings'
# - rule:
# handler:
# type: static
# status: 200
# content_type: 'text/plain; charset=UTF-8'
# response_content: config://http_server_default_response
send_crash_reports:
# Changing <enabled> to true allows sending crash reports to
# the ClickHouse core developers team via Sentry https://sentry.io
# Doing so at least in pre-production environments is highly appreciated
enabled: false
# Change <anonymize> to true if you don't feel comfortable attaching the server hostname to the crash report
anonymize: false
# Default endpoint should be changed to different Sentry DSN only if you have
# some in-house engineers or hired consultants who're going to debug ClickHouse issues for you
endpoint: 'https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277'
# Uncomment to disable ClickHouse internal DNS caching.
# disable_internal_dns_cache: 1
storage_configuration:
disks:
s3:
secret_access_key: REPLACE_ME
access_key_id: 'REPLACE_ME'

View File

@ -1,6 +0,0 @@
# Users and ACL.
users:
# If user name was not specified, 'default' user is used.
default:
password_sha256_hex: "REPLACE_ME"

View File

@ -1,47 +0,0 @@
include_from: "../include/yaml/user-include.yaml"
# Profiles of settings.
profiles:
# Default settings.
default:
# Maximum memory usage for processing single query, in bytes.
max_memory_usage: 10000000000
load_balancing: random
# Profile that allows only read queries.
readonly:
readonly: 1
# Users and ACL.
users:
# If user name was not specified, 'default' user is used.
default:
password: 'REPLACE_ME'
networks:
ip: '::/0'
# Settings profile for user.
profile: default
# Quota for user.
quota: default
# User can create other users and grant rights to them.
# access_management: 1
# Quotas.
quotas:
# Name of quota.
default:
# Limits for time interval. You could specify many intervals with different limits.
interval:
# Length of interval.
duration: 3600
# No limits. Just calculate resource usage for time interval.
queries: 0
errors: 0
result_rows: 0
read_rows: 0
execution_time: 0

File diff suppressed because it is too large Load Diff

View File

@ -1,15 +0,0 @@
<clickhouse>
<!-- Profiles of settings. -->
<profiles>
<!-- Default settings. -->
<default>
<!-- Allows us to create replicated databases. -->
<allow_experimental_database_replicated>1</allow_experimental_database_replicated>
</default>
</profiles>
<users>
<default>
<access_management>1</access_management>
</default>
</users>
</clickhouse>

View File

@ -1,8 +0,0 @@
<clickhouse>
<listen_host>::</listen_host>
<listen_host>0.0.0.0</listen_host>
<listen_try>1</listen_try>
<logger>
<console>1</console>
</logger>
</clickhouse>

View File

@ -1,10 +0,0 @@
2021.12.13 10:12:26.940169 [ 38398 ] {} <Warning> Access(local directory): File /var/lib/clickhouse/access/users.list doesn't exist
2021.12.13 10:12:26.940204 [ 38398 ] {} <Warning> Access(local directory): Recovering lists in directory /var/lib/clickhouse/access/
2021.12.13 10:12:40.649453 [ 38445 ] {} <Error> Access(user directories): from: 127.0.0.1, user: default: Authentication failed: Code: 193. DB::Exception: Invalid credentials. (WRONG_PASSWORD), Stack trace (when copying this message, always include the lines below):
0. DB::Exception::Exception(std::__1::basic_string<char, std::__1::char_traits<char>, std::__1::allocator<char> > const&, int, bool) @ 0x9b722d4 in /usr/bin/clickhouse
1. DB::IAccessStorage::throwInvalidCredentials() @ 0x119d9b27 in /usr/bin/clickhouse
2. DB::IAccessStorage::loginImpl(DB::Credentials const&, Poco::Net::IPAddress const&, DB::ExternalAuthenticators const&) const @ 0x119d98d7 in /usr/bin/clickhouse
3. DB::IAccessStorage::login(DB::Credentials const&, Poco::Net::IPAddress const&, DB::ExternalAuthenticators const&, bool) const @ 0x119d9084 in /usr/bin/clickhouse
4. DB::MultipleAccessStorage::loginImpl(DB::Credentials const&, Poco::Net::IPAddress const&, DB::ExternalAuthenticators const&) const @ 0x119ff93c in /usr/bin/clickhouse
5. DB::IAccessStorage::login(DB::Credentials const&, Poco::Net::IPAddress const&, DB::ExternalAuthenticators const&, bool) const @ 0x119d9084 in /usr/bin/clickhouse

View File

@ -1,10 +0,0 @@
2022.02.02 14:49:32.458680 [ 200404 ] {} <Debug> DiskLocal: Reserving 2.47 MiB on disk `default`, having unreserved 1.56 TiB.
2022.02.02 14:49:32.459086 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Debug> MergeTask::PrepareStage: Merging 2 parts: from 202202_147058_147549_343 to 202202_147550_147550_0 into Wide
2022.02.02 14:49:32.459201 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Debug> MergeTask::PrepareStage: Selected MergeAlgorithm: Horizontal
2022.02.02 14:49:32.459262 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Debug> MergeTreeSequentialSource: Reading 159 marks from part 202202_147058_147549_343, total 1289014 rows starting from the beginning of the part
2022.02.02 14:49:32.459614 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Debug> MergeTreeSequentialSource: Reading 2 marks from part 202202_147550_147550_0, total 2618 rows starting from the beginning of the part
2022.02.02 14:49:32.507755 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Debug> MergeTask::MergeProjectionsStage: Merge sorted 1291632 rows, containing 5 columns (5 merged, 0 gathered) in 0.048711404 sec., 26516008.448452853 rows/sec., 639.52 MiB/sec.
2022.02.02 14:49:32.508332 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Trace> system.asynchronous_metric_log (de87df8b-2250-439c-9e87-df8b2250339c): Renaming temporary part tmp_merge_202202_147058_147550_344 to 202202_147058_147550_344.
2022.02.02 14:49:32.508406 [ 200359 ] {de87df8b-2250-439c-9e87-df8b2250339c::202202_147058_147550_344} <Trace> system.asynchronous_metric_log (de87df8b-2250-439c-9e87-df8b2250339c) (MergerMutator): Merged 2 parts: from 202202_147058_147549_343 to 202202_147550_147550_0
2022.02.02 14:49:32.508440 [ 200359 ] {} <Debug> MemoryTracker: Peak memory usage Mutate/Merge: 16.31 MiB.
2022.02.02 14:49:33.000148 [ 200388 ] {} <Trace> AsynchronousMetrics: MemoryTracking: was 774.16 MiB, peak 2.51 GiB, will set to 772.30 MiB (RSS), difference: -1.86 MiB

View File

@ -1 +0,0 @@
dummy hz file for tests