mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-22 23:52:03 +00:00
Merge remote-tracking branch 'upstream/master' into named-collections-sql-commands
This commit is contained in:
commit
dca3444283
1
.exrc
Normal file
1
.exrc
Normal file
@ -0,0 +1 @@
|
|||||||
|
au BufRead,BufNewFile * set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags
|
28
.github/workflows/backport_branches.yml
vendored
28
.github/workflows/backport_branches.yml
vendored
@ -145,8 +145,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -190,8 +190,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -233,8 +233,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -276,8 +276,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -319,8 +319,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -364,8 +364,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -409,8 +409,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
212
.github/workflows/master.yml
vendored
212
.github/workflows/master.yml
vendored
@ -209,8 +209,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -251,8 +251,8 @@ jobs:
|
|||||||
fetch-depth: 0 # For a proper version and performance artifacts
|
fetch-depth: 0 # For a proper version and performance artifacts
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -295,8 +295,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -338,8 +338,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -381,8 +381,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -424,8 +424,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -467,8 +467,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -510,8 +510,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -556,8 +556,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -599,8 +599,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -644,8 +644,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -689,8 +689,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -734,8 +734,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -779,8 +779,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -824,8 +824,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -869,8 +869,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -914,8 +914,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -3011,6 +3011,150 @@ jobs:
|
|||||||
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-0:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=0
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-1:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=1
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-2:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=2
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
PerformanceComparisonAarch-3:
|
||||||
|
needs: [BuilderDebAarch64]
|
||||||
|
runs-on: [self-hosted, func-tester-aarch64]
|
||||||
|
steps:
|
||||||
|
- name: Set envs
|
||||||
|
run: |
|
||||||
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
|
TEMP_PATH=${{runner.temp}}/performance_comparison
|
||||||
|
REPORTS_PATH=${{runner.temp}}/reports_dir
|
||||||
|
CHECK_NAME=Performance Comparison Aarch64
|
||||||
|
REPO_COPY=${{runner.temp}}/performance_comparison/ClickHouse
|
||||||
|
RUN_BY_HASH_NUM=3
|
||||||
|
RUN_BY_HASH_TOTAL=4
|
||||||
|
EOF
|
||||||
|
- name: Download json reports
|
||||||
|
uses: actions/download-artifact@v2
|
||||||
|
with:
|
||||||
|
path: ${{ env.REPORTS_PATH }}
|
||||||
|
- name: Clear repository
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$GITHUB_WORKSPACE" && mkdir "$GITHUB_WORKSPACE"
|
||||||
|
- name: Check out repository code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
- name: Performance Comparison
|
||||||
|
run: |
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
|
mkdir -p "$TEMP_PATH"
|
||||||
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
cd "$REPO_COPY/tests/ci"
|
||||||
|
python3 performance_comparison_check.py "$CHECK_NAME"
|
||||||
|
- name: Cleanup
|
||||||
|
if: always()
|
||||||
|
run: |
|
||||||
|
docker ps --quiet | xargs --no-run-if-empty docker kill ||:
|
||||||
|
docker ps --all --quiet | xargs --no-run-if-empty docker rm -f ||:
|
||||||
|
sudo rm -fr "$TEMP_PATH"
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
###################################### SQLANCER FUZZERS ######################################
|
###################################### SQLANCER FUZZERS ######################################
|
||||||
##############################################################################################
|
##############################################################################################
|
||||||
|
2
.github/workflows/nightly.yml
vendored
2
.github/workflows/nightly.yml
vendored
@ -105,7 +105,7 @@ jobs:
|
|||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
73
.github/workflows/pull_request.yml
vendored
73
.github/workflows/pull_request.yml
vendored
@ -22,6 +22,8 @@ on: # yamllint disable-line rule:truthy
|
|||||||
jobs:
|
jobs:
|
||||||
CheckLabels:
|
CheckLabels:
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
# Run the first check always, even if the CI is cancelled
|
||||||
|
if: ${{ always() }}
|
||||||
steps:
|
steps:
|
||||||
- name: Clear repository
|
- name: Clear repository
|
||||||
run: |
|
run: |
|
||||||
@ -112,7 +114,8 @@ jobs:
|
|||||||
StyleCheck:
|
StyleCheck:
|
||||||
needs: DockerHubPush
|
needs: DockerHubPush
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
if: ${{ success() || failure() || always() }}
|
# We need additional `&& ! cancelled()` to have the job being able to cancel
|
||||||
|
if: ${{ success() || failure() || ( always() && ! cancelled() ) }}
|
||||||
steps:
|
steps:
|
||||||
- name: Set envs
|
- name: Set envs
|
||||||
run: |
|
run: |
|
||||||
@ -272,8 +275,8 @@ jobs:
|
|||||||
fetch-depth: 0 # for performance artifact
|
fetch-depth: 0 # for performance artifact
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -315,8 +318,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -360,8 +363,8 @@ jobs:
|
|||||||
fetch-depth: 0 # for performance artifact
|
fetch-depth: 0 # for performance artifact
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -403,8 +406,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -446,8 +449,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -489,8 +492,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -532,8 +535,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -575,8 +578,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -621,8 +624,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -664,8 +667,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -707,8 +710,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -750,8 +753,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -793,8 +796,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -836,8 +839,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -879,8 +882,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -922,8 +925,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -965,8 +968,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
36
.github/workflows/release_branches.yml
vendored
36
.github/workflows/release_branches.yml
vendored
@ -136,8 +136,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -178,8 +178,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -220,8 +220,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -263,8 +263,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -306,8 +306,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -349,8 +349,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -392,8 +392,8 @@ jobs:
|
|||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -437,8 +437,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
@ -482,8 +482,8 @@ jobs:
|
|||||||
fetch-depth: 0 # otherwise we will have no info about contributors
|
fetch-depth: 0 # otherwise we will have no info about contributors
|
||||||
- name: Build
|
- name: Build
|
||||||
run: |
|
run: |
|
||||||
git -C "$GITHUB_WORKSPACE" submodule sync --recursive
|
git -C "$GITHUB_WORKSPACE" submodule sync
|
||||||
git -C "$GITHUB_WORKSPACE" submodule update --depth=1 --recursive --init --jobs=10
|
git -C "$GITHUB_WORKSPACE" submodule update --single-branch --depth=1 --init --jobs=10
|
||||||
sudo rm -fr "$TEMP_PATH"
|
sudo rm -fr "$TEMP_PATH"
|
||||||
mkdir -p "$TEMP_PATH"
|
mkdir -p "$TEMP_PATH"
|
||||||
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
cp -r "$GITHUB_WORKSPACE" "$TEMP_PATH"
|
||||||
|
9
.gitmodules
vendored
9
.gitmodules
vendored
@ -65,12 +65,6 @@
|
|||||||
[submodule "contrib/libgsasl"]
|
[submodule "contrib/libgsasl"]
|
||||||
path = contrib/libgsasl
|
path = contrib/libgsasl
|
||||||
url = https://github.com/ClickHouse/libgsasl.git
|
url = https://github.com/ClickHouse/libgsasl.git
|
||||||
[submodule "contrib/libcxx"]
|
|
||||||
path = contrib/libcxx
|
|
||||||
url = https://github.com/ClickHouse/libcxx.git
|
|
||||||
[submodule "contrib/libcxxabi"]
|
|
||||||
path = contrib/libcxxabi
|
|
||||||
url = https://github.com/ClickHouse/libcxxabi.git
|
|
||||||
[submodule "contrib/snappy"]
|
[submodule "contrib/snappy"]
|
||||||
path = contrib/snappy
|
path = contrib/snappy
|
||||||
url = https://github.com/ClickHouse/snappy.git
|
url = https://github.com/ClickHouse/snappy.git
|
||||||
@ -290,3 +284,6 @@
|
|||||||
[submodule "contrib/morton-nd"]
|
[submodule "contrib/morton-nd"]
|
||||||
path = contrib/morton-nd
|
path = contrib/morton-nd
|
||||||
url = https://github.com/morton-nd/morton-nd
|
url = https://github.com/morton-nd/morton-nd
|
||||||
|
[submodule "contrib/xxHash"]
|
||||||
|
path = contrib/xxHash
|
||||||
|
url = https://github.com/Cyan4973/xxHash.git
|
||||||
|
2
.vimrc
2
.vimrc
@ -1,2 +0,0 @@
|
|||||||
au BufRead,BufNewFile ./* set tabstop=4 softtabstop=0 expandtab shiftwidth=4 smarttab tags=tags,../tags
|
|
||||||
|
|
@ -17,5 +17,7 @@ ClickHouse® is an open-source column-oriented database management system that a
|
|||||||
|
|
||||||
## Upcoming events
|
## Upcoming events
|
||||||
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
* [**v22.11 Release Webinar**](https://clickhouse.com/company/events/v22-11-release-webinar) Original creator, co-founder, and CTO of ClickHouse Alexey Milovidov will walk us through the highlights of the release, provide live demos, and share vision into what is coming in the roadmap.
|
||||||
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
* [**ClickHosue Meetup at the RELEX Solutions office in Stockholm**](https://www.meetup.com/clickhouse-stockholm-user-group/events/289492084/) - Dec 1 - Formulate by RELEX is a Swedish promotion planning and analytics company. They will share why they chose ClickHouse for their real time analytics and forecasting solution. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||||
* [**AWS re:Invent**](https://clickhouse.com/company/events/aws-reinvent) Core members of the ClickHouse team -- including 2 of our founders -- will be at re:Invent from November 29 to December 3. We are available on the show floor, but are also determining interest in holding an event during the time there.
|
* [**ClickHouse Meetup at the Deutsche Bank office in Berlin**](https://www.meetup.com/clickhouse-berlin-user-group/events/289311596/) - Dec 5 - Hear from Deutsche Bank on why they chose ClickHouse for big sensitive data in a regulated environment. The ClickHouse team will then present how ClickHouse is used for real time financial data analytics, including tick data, trade analytics and risk management.
|
||||||
|
* [**ClickHouse Meetup at the Rokt offices in Manhattan**](https://www.meetup.com/clickhouse-new-york-user-group/events/289403909/) - Dec 6 - We are very excited to be holding our next in-person ClickHouse meetup at the Rokt offices in Manhattan. Featuring talks from Bloomberg, Disney Streaming, Prequel, Rokt, and ClickHouse
|
||||||
|
|
||||||
|
@ -12,7 +12,21 @@
|
|||||||
template <typename To, typename From>
|
template <typename To, typename From>
|
||||||
std::decay_t<To> bit_cast(const From & from)
|
std::decay_t<To> bit_cast(const From & from)
|
||||||
{
|
{
|
||||||
|
/**
|
||||||
|
* Assume the source value is 0xAABBCCDD (i.e. sizeof(from) == 4).
|
||||||
|
* Its BE representation is 0xAABBCCDD, the LE representation is 0xDDCCBBAA.
|
||||||
|
* Further assume, sizeof(res) == 8 and that res is initially zeroed out.
|
||||||
|
* With LE, the result after bit_cast will be 0xDDCCBBAA00000000 --> input value == output value.
|
||||||
|
* With BE, the result after bit_cast will be 0x00000000AABBCCDD --> input value == output value.
|
||||||
|
*/
|
||||||
To res {};
|
To res {};
|
||||||
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
|
if constexpr (std::endian::native == std::endian::little)
|
||||||
|
memcpy(static_cast<void*>(&res), &from, std::min(sizeof(res), sizeof(from)));
|
||||||
|
else
|
||||||
|
{
|
||||||
|
uint32_t offset_to = (sizeof(res) > sizeof(from)) ? (sizeof(res) - sizeof(from)) : 0;
|
||||||
|
uint32_t offset_from = (sizeof(from) > sizeof(res)) ? (sizeof(from) - sizeof(res)) : 0;
|
||||||
|
memcpy(reinterpret_cast<char *>(&res) + offset_to, reinterpret_cast<const char *>(&from) + offset_from, std::min(sizeof(res), sizeof(from)));
|
||||||
|
}
|
||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
4
contrib/CMakeLists.txt
vendored
4
contrib/CMakeLists.txt
vendored
@ -167,7 +167,9 @@ add_contrib (c-ares-cmake c-ares)
|
|||||||
add_contrib (qpl-cmake qpl)
|
add_contrib (qpl-cmake qpl)
|
||||||
add_contrib (morton-nd-cmake morton-nd)
|
add_contrib (morton-nd-cmake morton-nd)
|
||||||
|
|
||||||
add_contrib(annoy-cmake annoy)
|
add_contrib (annoy-cmake annoy)
|
||||||
|
|
||||||
|
add_contrib (xxHash-cmake xxHash)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
|
1
contrib/libcxx
vendored
1
contrib/libcxx
vendored
@ -1 +0,0 @@
|
|||||||
Subproject commit 4db7f838afd3139eb3761694b04d31275df45d2d
|
|
@ -1,6 +1,6 @@
|
|||||||
include(CheckCXXCompilerFlag)
|
include(CheckCXXCompilerFlag)
|
||||||
|
|
||||||
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxx")
|
set(LIBCXX_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxx")
|
||||||
|
|
||||||
set(SRCS
|
set(SRCS
|
||||||
"${LIBCXX_SOURCE_DIR}/src/algorithm.cpp"
|
"${LIBCXX_SOURCE_DIR}/src/algorithm.cpp"
|
||||||
|
1
contrib/libcxxabi
vendored
1
contrib/libcxxabi
vendored
@ -1 +0,0 @@
|
|||||||
Subproject commit a736a6b3c6a7b8aae2ebad629ca21b2c55b4820e
|
|
@ -1,4 +1,4 @@
|
|||||||
set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/libcxxabi")
|
set(LIBCXXABI_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/llvm-project/libcxxabi")
|
||||||
|
|
||||||
set(SRCS
|
set(SRCS
|
||||||
"${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp"
|
"${LIBCXXABI_SOURCE_DIR}/src/abort_message.cpp"
|
||||||
|
2
contrib/llvm-project
vendored
2
contrib/llvm-project
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3a39038345a400e7e767811b142a94355d511215
|
Subproject commit e61a81aa6fc529b469e2a54b7ce788606e138b5d
|
2
contrib/poco
vendored
2
contrib/poco
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 76746b35d0e254eaaba71dc3b79e46cba8cbb144
|
Subproject commit 799234226187c0ae0b8c90f23465b25ed7956e56
|
1
contrib/xxHash
vendored
Submodule
1
contrib/xxHash
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 3078dc6039f8c0bffcb1904f81cfe6b2c3209435
|
13
contrib/xxHash-cmake/CMakeLists.txt
Normal file
13
contrib/xxHash-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
set (LIBRARY_DIR "${ClickHouse_SOURCE_DIR}/contrib/xxHash")
|
||||||
|
set (SRCS
|
||||||
|
"${LIBRARY_DIR}/xxhash.c"
|
||||||
|
)
|
||||||
|
|
||||||
|
add_library(xxHash ${SRCS})
|
||||||
|
target_include_directories(xxHash SYSTEM BEFORE INTERFACE "${LIBRARY_DIR}")
|
||||||
|
|
||||||
|
# XXH_INLINE_ALL - Make all functions inline, with implementations being directly included within xxhash.h. Inlining functions is beneficial for speed on small keys.
|
||||||
|
# https://github.com/Cyan4973/xxHash/tree/v0.8.1#build-modifiers
|
||||||
|
target_compile_definitions(xxHash PUBLIC XXH_INLINE_ALL)
|
||||||
|
|
||||||
|
add_library(ch_contrib::xxHash ALIAS xxHash)
|
@ -6,29 +6,24 @@ FROM clickhouse/test-util:$FROM_TAG
|
|||||||
# Rust toolchain and libraries
|
# Rust toolchain and libraries
|
||||||
ENV RUSTUP_HOME=/rust/rustup
|
ENV RUSTUP_HOME=/rust/rustup
|
||||||
ENV CARGO_HOME=/rust/cargo
|
ENV CARGO_HOME=/rust/cargo
|
||||||
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y
|
|
||||||
RUN chmod 777 -R /rust
|
|
||||||
ENV PATH="/rust/cargo/env:${PATH}"
|
ENV PATH="/rust/cargo/env:${PATH}"
|
||||||
ENV PATH="/rust/cargo/bin:${PATH}"
|
ENV PATH="/rust/cargo/bin:${PATH}"
|
||||||
RUN rustup target add aarch64-unknown-linux-gnu && \
|
RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \
|
||||||
rustup target add x86_64-apple-darwin && \
|
chmod 777 -R /rust && \
|
||||||
rustup target add x86_64-unknown-freebsd && \
|
rustup target add aarch64-unknown-linux-gnu && \
|
||||||
rustup target add aarch64-apple-darwin && \
|
rustup target add x86_64-apple-darwin && \
|
||||||
rustup target add powerpc64le-unknown-linux-gnu
|
rustup target add x86_64-unknown-freebsd && \
|
||||||
RUN apt-get install \
|
rustup target add aarch64-apple-darwin && \
|
||||||
|
rustup target add powerpc64le-unknown-linux-gnu
|
||||||
|
|
||||||
|
RUN apt-get update && \
|
||||||
|
apt-get install --yes \
|
||||||
gcc-aarch64-linux-gnu \
|
gcc-aarch64-linux-gnu \
|
||||||
build-essential \
|
build-essential \
|
||||||
libc6 \
|
libc6 \
|
||||||
libc6-dev \
|
libc6-dev \
|
||||||
libc6-dev-arm64-cross \
|
libc6-dev-arm64-cross && \
|
||||||
--yes
|
apt-get clean
|
||||||
|
|
||||||
# Install CMake 3.20+ for Rust compilation
|
|
||||||
# Used https://askubuntu.com/a/1157132 as reference
|
|
||||||
RUN apt purge cmake --yes
|
|
||||||
RUN wget -O - https://apt.kitware.com/keys/kitware-archive-latest.asc 2>/dev/null | gpg --dearmor - | tee /etc/apt/trusted.gpg.d/kitware.gpg >/dev/null
|
|
||||||
RUN apt-add-repository 'deb https://apt.kitware.com/ubuntu/ focal main'
|
|
||||||
RUN apt update && apt install cmake --yes
|
|
||||||
|
|
||||||
ENV CC=clang-${LLVM_VERSION}
|
ENV CC=clang-${LLVM_VERSION}
|
||||||
ENV CXX=clang++-${LLVM_VERSION}
|
ENV CXX=clang++-${LLVM_VERSION}
|
||||||
|
@ -117,8 +117,7 @@ function clone_submodules
|
|||||||
contrib/cctz
|
contrib/cctz
|
||||||
contrib/libcpuid
|
contrib/libcpuid
|
||||||
contrib/double-conversion
|
contrib/double-conversion
|
||||||
contrib/libcxx
|
contrib/llvm-project
|
||||||
contrib/libcxxabi
|
|
||||||
contrib/lz4
|
contrib/lz4
|
||||||
contrib/zstd
|
contrib/zstd
|
||||||
contrib/fastops
|
contrib/fastops
|
||||||
@ -137,6 +136,7 @@ function clone_submodules
|
|||||||
contrib/hashidsxx
|
contrib/hashidsxx
|
||||||
contrib/c-ares
|
contrib/c-ares
|
||||||
contrib/morton-nd
|
contrib/morton-nd
|
||||||
|
contrib/xxHash
|
||||||
)
|
)
|
||||||
|
|
||||||
git submodule sync
|
git submodule sync
|
||||||
|
@ -38,7 +38,7 @@ COPY * /
|
|||||||
SHELL ["/bin/bash", "-c"]
|
SHELL ["/bin/bash", "-c"]
|
||||||
CMD set -o pipefail \
|
CMD set -o pipefail \
|
||||||
&& cd /workspace \
|
&& cd /workspace \
|
||||||
&& /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
&& timeout -s 9 1h /run-fuzzer.sh 2>&1 | ts "$(printf '%%Y-%%m-%%d %%H:%%M:%%S\t')" | tee main.log
|
||||||
|
|
||||||
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
|
# docker run --network=host --volume <workspace>:/workspace -e PR_TO_TEST=<> -e SHA_TO_TEST=<> clickhouse/fuzzer
|
||||||
|
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031
|
# shellcheck disable=SC2086,SC2001,SC2046,SC2030,SC2031,SC2010,SC2015
|
||||||
|
|
||||||
set -x
|
set -x
|
||||||
|
|
||||||
@ -10,11 +10,6 @@ set -e
|
|||||||
set -u
|
set -u
|
||||||
set -o pipefail
|
set -o pipefail
|
||||||
|
|
||||||
trap "exit" INT TERM
|
|
||||||
# The watchdog is in the separate process group, so we have to kill it separately
|
|
||||||
# if the script terminates earlier.
|
|
||||||
trap 'kill $(jobs -pr) ${watchdog_pid:-} ||:' EXIT
|
|
||||||
|
|
||||||
stage=${stage:-}
|
stage=${stage:-}
|
||||||
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
echo "$script_dir"
|
echo "$script_dir"
|
||||||
@ -110,26 +105,6 @@ function configure
|
|||||||
EOL
|
EOL
|
||||||
}
|
}
|
||||||
|
|
||||||
function watchdog
|
|
||||||
{
|
|
||||||
sleep 1800
|
|
||||||
|
|
||||||
echo "Fuzzing run has timed out"
|
|
||||||
for _ in {1..10}
|
|
||||||
do
|
|
||||||
# Only kill by pid the particular client that runs the fuzzing, or else
|
|
||||||
# we can kill some clickhouse-client processes this script starts later,
|
|
||||||
# e.g. for checking server liveness.
|
|
||||||
if ! kill $fuzzer_pid
|
|
||||||
then
|
|
||||||
break
|
|
||||||
fi
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
kill -9 -- $fuzzer_pid ||:
|
|
||||||
}
|
|
||||||
|
|
||||||
function filter_exists_and_template
|
function filter_exists_and_template
|
||||||
{
|
{
|
||||||
local path
|
local path
|
||||||
@ -175,8 +150,6 @@ function fuzz
|
|||||||
|
|
||||||
mkdir -p /var/run/clickhouse-server
|
mkdir -p /var/run/clickhouse-server
|
||||||
|
|
||||||
# interferes with gdb
|
|
||||||
export CLICKHOUSE_WATCHDOG_ENABLE=0
|
|
||||||
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
# NOTE: we use process substitution here to preserve keep $! as a pid of clickhouse-server
|
||||||
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
clickhouse-server --config-file db/config.xml --pid-file /var/run/clickhouse-server/clickhouse-server.pid -- --path db 2>&1 | pigz > server.log.gz &
|
||||||
server_pid=$!
|
server_pid=$!
|
||||||
@ -214,7 +187,7 @@ detach
|
|||||||
quit
|
quit
|
||||||
" > script.gdb
|
" > script.gdb
|
||||||
|
|
||||||
gdb -batch -command script.gdb -p $server_pid &
|
gdb -batch -command script.gdb -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" &
|
||||||
sleep 5
|
sleep 5
|
||||||
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
# gdb will send SIGSTOP, spend some time loading debug info and then send SIGCONT, wait for it (up to send_timeout, 300s)
|
||||||
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
time clickhouse-client --query "SELECT 'Connected to clickhouse-server after attaching gdb'" ||:
|
||||||
@ -236,7 +209,7 @@ quit
|
|||||||
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
# SC2012: Use find instead of ls to better handle non-alphanumeric filenames. They are all alphanumeric.
|
||||||
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
# SC2046: Quote this to prevent word splitting. Actually I need word splitting.
|
||||||
# shellcheck disable=SC2012,SC2046
|
# shellcheck disable=SC2012,SC2046
|
||||||
clickhouse-client \
|
timeout -s TERM --preserve-status 30m clickhouse-client \
|
||||||
--receive_timeout=10 \
|
--receive_timeout=10 \
|
||||||
--receive_data_timeout_ms=10000 \
|
--receive_data_timeout_ms=10000 \
|
||||||
--stacktrace \
|
--stacktrace \
|
||||||
@ -249,16 +222,6 @@ quit
|
|||||||
fuzzer_pid=$!
|
fuzzer_pid=$!
|
||||||
echo "Fuzzer pid is $fuzzer_pid"
|
echo "Fuzzer pid is $fuzzer_pid"
|
||||||
|
|
||||||
# Start a watchdog that should kill the fuzzer on timeout.
|
|
||||||
# The shell won't kill the child sleep when we kill it, so we have to put it
|
|
||||||
# into a separate process group so that we can kill them all.
|
|
||||||
set -m
|
|
||||||
watchdog &
|
|
||||||
watchdog_pid=$!
|
|
||||||
set +m
|
|
||||||
# Check that the watchdog has started.
|
|
||||||
kill -0 $watchdog_pid
|
|
||||||
|
|
||||||
# Wait for the fuzzer to complete.
|
# Wait for the fuzzer to complete.
|
||||||
# Note that the 'wait || ...' thing is required so that the script doesn't
|
# Note that the 'wait || ...' thing is required so that the script doesn't
|
||||||
# exit because of 'set -e' when 'wait' returns nonzero code.
|
# exit because of 'set -e' when 'wait' returns nonzero code.
|
||||||
@ -266,8 +229,6 @@ quit
|
|||||||
wait "$fuzzer_pid" || fuzzer_exit_code=$?
|
wait "$fuzzer_pid" || fuzzer_exit_code=$?
|
||||||
echo "Fuzzer exit code is $fuzzer_exit_code"
|
echo "Fuzzer exit code is $fuzzer_exit_code"
|
||||||
|
|
||||||
kill -- -$watchdog_pid ||:
|
|
||||||
|
|
||||||
# If the server dies, most often the fuzzer returns code 210: connetion
|
# If the server dies, most often the fuzzer returns code 210: connetion
|
||||||
# refused, and sometimes also code 32: attempt to read after eof. For
|
# refused, and sometimes also code 32: attempt to read after eof. For
|
||||||
# simplicity, check again whether the server is accepting connections, using
|
# simplicity, check again whether the server is accepting connections, using
|
||||||
@ -333,6 +294,8 @@ quit
|
|||||||
pigz core.*
|
pigz core.*
|
||||||
mv core.*.gz core.gz
|
mv core.*.gz core.gz
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
dmesg -T | grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' && echo "OOM in dmesg" ||:
|
||||||
}
|
}
|
||||||
|
|
||||||
case "$stage" in
|
case "$stage" in
|
||||||
|
@ -295,6 +295,9 @@ if not args.use_existing_tables:
|
|||||||
|
|
||||||
reportStageEnd("create")
|
reportStageEnd("create")
|
||||||
|
|
||||||
|
# Let's sync the data to avoid writeback affects performance
|
||||||
|
os.system("sync")
|
||||||
|
|
||||||
# By default, test all queries.
|
# By default, test all queries.
|
||||||
queries_to_run = range(0, len(test_queries))
|
queries_to_run = range(0, len(test_queries))
|
||||||
|
|
||||||
|
@ -131,7 +131,14 @@ function stop()
|
|||||||
# Preserve the pid, since the server can hung after the PID will be deleted.
|
# Preserve the pid, since the server can hung after the PID will be deleted.
|
||||||
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
|
||||||
|
|
||||||
clickhouse stop --do-not-kill && return
|
# --max-tries is supported only since 22.12
|
||||||
|
if dpkg --compare-versions "$(clickhouse local -q 'select version()')" ge "22.12"; then
|
||||||
|
# Increase default waiting timeout for sanitizers and debug builds
|
||||||
|
clickhouse stop --max-tries 180 --do-not-kill && return
|
||||||
|
else
|
||||||
|
clickhouse stop --do-not-kill && return
|
||||||
|
fi
|
||||||
|
|
||||||
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
|
||||||
kill -TERM "$(pidof gdb)" ||:
|
kill -TERM "$(pidof gdb)" ||:
|
||||||
sleep 5
|
sleep 5
|
||||||
@ -388,6 +395,8 @@ else
|
|||||||
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/storage_conf.xml ||:
|
||||||
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
rm -f /etc/clickhouse-server/config.d/azure_storage_conf.xml ||:
|
||||||
|
|
||||||
|
# Turn on after 22.12
|
||||||
|
rm -f /etc/clickhouse-server/config.d/compressed_marks_and_index.xml ||:
|
||||||
# it uses recently introduced settings which previous versions may not have
|
# it uses recently introduced settings which previous versions may not have
|
||||||
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
rm -f /etc/clickhouse-server/users.d/insert_keeper_retries.xml ||:
|
||||||
|
|
||||||
@ -456,7 +465,7 @@ else
|
|||||||
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
zgrep -Fav -e "Code: 236. DB::Exception: Cancelled merging parts" \
|
||||||
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
-e "Code: 236. DB::Exception: Cancelled mutating parts" \
|
||||||
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
-e "REPLICA_IS_ALREADY_ACTIVE" \
|
||||||
-e "REPLICA_IS_ALREADY_EXIST" \
|
-e "REPLICA_ALREADY_EXISTS" \
|
||||||
-e "ALL_REPLICAS_LOST" \
|
-e "ALL_REPLICAS_LOST" \
|
||||||
-e "DDLWorker: Cannot parse DDL task query" \
|
-e "DDLWorker: Cannot parse DDL task query" \
|
||||||
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
-e "RaftInstance: failed to accept a rpc connection due to error 125" \
|
||||||
@ -487,6 +496,7 @@ else
|
|||||||
-e "Code: 269. DB::Exception: Destination table is myself" \
|
-e "Code: 269. DB::Exception: Destination table is myself" \
|
||||||
-e "Coordination::Exception: Connection loss" \
|
-e "Coordination::Exception: Connection loss" \
|
||||||
-e "MutateFromLogEntryTask" \
|
-e "MutateFromLogEntryTask" \
|
||||||
|
-e "No connection to ZooKeeper, cannot get shared table ID" \
|
||||||
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
/var/log/clickhouse-server/clickhouse-server.backward.clean.log | zgrep -Fa "<Error>" > /test_output/bc_check_error_messages.txt \
|
||||||
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
&& echo -e 'Backward compatibility check: Error message in clickhouse-server.log (see bc_check_error_messages.txt)\tFAIL' >> /test_output/test_results.tsv \
|
||||||
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
|| echo -e 'Backward compatibility check: No Error messages in clickhouse-server.log\tOK' >> /test_output/test_results.tsv
|
||||||
|
@ -17,7 +17,7 @@ RUN apt-get update && env DEBIAN_FRONTEND=noninteractive apt-get install --yes \
|
|||||||
python3-pip \
|
python3-pip \
|
||||||
shellcheck \
|
shellcheck \
|
||||||
yamllint \
|
yamllint \
|
||||||
&& pip3 install black==22.8.0 boto3 codespell==2.2.1 dohq-artifactory PyGithub unidiff pylint==2.6.2 \
|
&& pip3 install black==22.8.0 boto3 codespell==2.2.1 dohq-artifactory mypy PyGithub unidiff pylint==2.6.2 \
|
||||||
&& apt-get clean \
|
&& apt-get clean \
|
||||||
&& rm -rf /root/.cache/pip
|
&& rm -rf /root/.cache/pip
|
||||||
|
|
||||||
|
@ -11,17 +11,19 @@ def process_result(result_folder):
|
|||||||
description = ""
|
description = ""
|
||||||
test_results = []
|
test_results = []
|
||||||
checks = (
|
checks = (
|
||||||
("header duplicates", "duplicate_output.txt"),
|
"duplicate includes",
|
||||||
("shellcheck", "shellcheck_output.txt"),
|
"shellcheck",
|
||||||
("style", "style_output.txt"),
|
"style",
|
||||||
("black", "black_output.txt"),
|
"black",
|
||||||
("typos", "typos_output.txt"),
|
"mypy",
|
||||||
("whitespaces", "whitespaces_output.txt"),
|
"typos",
|
||||||
("workflows", "workflows_output.txt"),
|
"whitespaces",
|
||||||
("doc typos", "doc_spell_output.txt"),
|
"workflows",
|
||||||
|
"docs spelling",
|
||||||
)
|
)
|
||||||
|
|
||||||
for name, out_file in checks:
|
for name in checks:
|
||||||
|
out_file = name.replace(" ", "_") + "_output.txt"
|
||||||
full_path = os.path.join(result_folder, out_file)
|
full_path = os.path.join(result_folder, out_file)
|
||||||
if not os.path.exists(full_path):
|
if not os.path.exists(full_path):
|
||||||
logging.info("No %s check log on path %s", name, full_path)
|
logging.info("No %s check log on path %s", name, full_path)
|
||||||
|
@ -4,15 +4,17 @@
|
|||||||
|
|
||||||
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
cd /ClickHouse/utils/check-style || echo -e "failure\tRepo not found" > /test_output/check_status.tsv
|
||||||
echo "Check duplicates" | ts
|
echo "Check duplicates" | ts
|
||||||
./check-duplicate-includes.sh |& tee /test_output/duplicate_output.txt
|
./check-duplicate-includes.sh |& tee /test_output/duplicate_includes_output.txt
|
||||||
echo "Check style" | ts
|
echo "Check style" | ts
|
||||||
./check-style -n |& tee /test_output/style_output.txt
|
./check-style -n |& tee /test_output/style_output.txt
|
||||||
echo "Check python formatting with black" | ts
|
echo "Check python formatting with black" | ts
|
||||||
./check-black -n |& tee /test_output/black_output.txt
|
./check-black -n |& tee /test_output/black_output.txt
|
||||||
|
echo "Check python type hinting with mypy" | ts
|
||||||
|
./check-mypy -n |& tee /test_output/mypy_output.txt
|
||||||
echo "Check typos" | ts
|
echo "Check typos" | ts
|
||||||
./check-typos |& tee /test_output/typos_output.txt
|
./check-typos |& tee /test_output/typos_output.txt
|
||||||
echo "Check docs spelling" | ts
|
echo "Check docs spelling" | ts
|
||||||
./check-doc-aspell |& tee /test_output/doc_spell_output.txt
|
./check-doc-aspell |& tee /test_output/docs_spelling_output.txt
|
||||||
echo "Check whitespaces" | ts
|
echo "Check whitespaces" | ts
|
||||||
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
./check-whitespaces -n |& tee /test_output/whitespaces_output.txt
|
||||||
echo "Check workflows" | ts
|
echo "Check workflows" | ts
|
||||||
|
@ -13,6 +13,7 @@ RUN apt-get update \
|
|||||||
apt-transport-https \
|
apt-transport-https \
|
||||||
apt-utils \
|
apt-utils \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
|
curl \
|
||||||
dnsutils \
|
dnsutils \
|
||||||
gnupg \
|
gnupg \
|
||||||
iputils-ping \
|
iputils-ping \
|
||||||
@ -24,10 +25,16 @@ RUN apt-get update \
|
|||||||
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
&& echo "${LLVM_PUBKEY_HASH} /tmp/llvm-snapshot.gpg.key" | sha384sum -c \
|
||||||
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
&& apt-key add /tmp/llvm-snapshot.gpg.key \
|
||||||
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
&& export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
|
||||||
&& echo "deb [trusted=yes] https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
&& echo "deb https://apt.llvm.org/${CODENAME}/ llvm-toolchain-${CODENAME}-${LLVM_VERSION} main" >> \
|
||||||
/etc/apt/sources.list \
|
/etc/apt/sources.list \
|
||||||
&& apt-get clean
|
&& apt-get clean
|
||||||
|
|
||||||
|
# Install cmake 3.20+ for rust support
|
||||||
|
# Used https://askubuntu.com/a/1157132 as reference
|
||||||
|
RUN curl -s https://apt.kitware.com/keys/kitware-archive-latest.asc | \
|
||||||
|
gpg --dearmor - > /etc/apt/trusted.gpg.d/kitware.gpg && \
|
||||||
|
echo "deb https://apt.kitware.com/ubuntu/ $(lsb_release -cs) main" >> /etc/apt/sources.list
|
||||||
|
|
||||||
# initial packages
|
# initial packages
|
||||||
RUN apt-get update \
|
RUN apt-get update \
|
||||||
&& apt-get install \
|
&& apt-get install \
|
||||||
@ -37,7 +44,6 @@ RUN apt-get update \
|
|||||||
clang-${LLVM_VERSION} \
|
clang-${LLVM_VERSION} \
|
||||||
clang-tidy-${LLVM_VERSION} \
|
clang-tidy-${LLVM_VERSION} \
|
||||||
cmake \
|
cmake \
|
||||||
curl \
|
|
||||||
fakeroot \
|
fakeroot \
|
||||||
gdb \
|
gdb \
|
||||||
git \
|
git \
|
||||||
|
@ -2,13 +2,20 @@
|
|||||||
|
|
||||||
The main task that indexes achieve is to quickly find nearest neighbors for multidimensional data. An example of such a problem can be finding similar pictures (texts) for a given picture (text). That problem can be reduced to finding the nearest [embeddings](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning). They can be created from data using [UDF](../../../sql-reference/functions/index.md#executable-user-defined-functions).
|
The main task that indexes achieve is to quickly find nearest neighbors for multidimensional data. An example of such a problem can be finding similar pictures (texts) for a given picture (text). That problem can be reduced to finding the nearest [embeddings](https://cloud.google.com/architecture/overview-extracting-and-serving-feature-embeddings-for-machine-learning). They can be created from data using [UDF](../../../sql-reference/functions/index.md#executable-user-defined-functions).
|
||||||
|
|
||||||
The next query finds the closest neighbors in N-dimensional space using the L2 (Euclidean) distance:
|
The next queries find the closest neighbors in N-dimensional space using the L2 (Euclidean) distance:
|
||||||
``` sql
|
``` sql
|
||||||
SELECT *
|
SELECT *
|
||||||
FROM table_name
|
FROM table_name
|
||||||
WHERE L2Distance(Column, Point) < MaxDistance
|
WHERE L2Distance(Column, Point) < MaxDistance
|
||||||
LIMIT N
|
LIMIT N
|
||||||
```
|
```
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
SELECT *
|
||||||
|
FROM table_name
|
||||||
|
ORDER BY L2Distance(Column, Point)
|
||||||
|
LIMIT N
|
||||||
|
```
|
||||||
But it will take some time for execution because of the long calculation of the distance between `TargetEmbedding` and all other vectors. This is where ANN indexes can help. They store a compact approximation of the search space (e.g. using clustering, search trees, etc.) and are able to compute approximate neighbors quickly.
|
But it will take some time for execution because of the long calculation of the distance between `TargetEmbedding` and all other vectors. This is where ANN indexes can help. They store a compact approximation of the search space (e.g. using clustering, search trees, etc.) and are able to compute approximate neighbors quickly.
|
||||||
|
|
||||||
## Indexes Structure
|
## Indexes Structure
|
||||||
@ -34,26 +41,27 @@ Approximate Nearest Neighbor Search Indexes (`ANNIndexes`) are similar to skip i
|
|||||||
|
|
||||||
In these queries, `DistanceFunction` is selected from [distance functions](../../../sql-reference/functions/distance-functions). `Point` is a known vector (something like `(0.1, 0.1, ... )`). To avoid writing large vectors, use [client parameters](../../../interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters). `Value` - a float value that will bound the neighbourhood.
|
In these queries, `DistanceFunction` is selected from [distance functions](../../../sql-reference/functions/distance-functions). `Point` is a known vector (something like `(0.1, 0.1, ... )`). To avoid writing large vectors, use [client parameters](../../../interfaces/cli.md#queries-with-parameters-cli-queries-with-parameters). `Value` - a float value that will bound the neighbourhood.
|
||||||
|
|
||||||
!!! note "Note"
|
:::note
|
||||||
ANN index can't speed up query that satisfies both types(`where + order by`, only one of them). All queries must have the limit, as algorithms are used to find nearest neighbors and need a specific number of them.
|
ANN index can't speed up query that satisfies both types (`where + order by`, only one of them). All queries must have the limit, as algorithms are used to find nearest neighbors and need a specific number of them.
|
||||||
|
:::
|
||||||
|
|
||||||
!!! note "Note"
|
:::note
|
||||||
Indexes are applied only to queries with a limit less than the `max_limit_for_ann_queries` setting. This helps to avoid memory overflows in queries with a large limit. `max_limit_for_ann_queries` setting can be changed if you know you can provide enough memory. The default value is `1000000`.
|
Indexes are applied only to queries with a limit less than the `max_limit_for_ann_queries` setting. This helps to avoid memory overflows in queries with a large limit. `max_limit_for_ann_queries` setting can be changed if you know you can provide enough memory. The default value is `1000000`.
|
||||||
|
:::
|
||||||
|
|
||||||
Both types of queries are handled the same way. The indexes get `n` neighbors (where `n` is taken from the `LIMIT` clause) and work with them. In `ORDER BY` query they remember the numbers of all parts of the granule that have at least one of neighbor. In `WHERE` query they remember only those parts that satisfy the requirements.
|
Both types of queries are handled the same way. The indexes get `n` neighbors (where `n` is taken from the `LIMIT` clause) and work with them. In `ORDER BY` query they remember the numbers of all parts of the granule that have at least one of neighbor. In `WHERE` query they remember only those parts that satisfy the requirements.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Create table with ANNIndex
|
## Create table with ANNIndex
|
||||||
|
|
||||||
This feature is disabled by default. To enable it, set `allow_experimental_annoy_index` to 1. Also, this feature is disabled for arm, due to likely problems with the algorithm.
|
This feature is disabled by default. To enable it, set `allow_experimental_annoy_index` to 1. Also, this feature is disabled on ARM, due to likely problems with the algorithm.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
`id` Int64,
|
`id` Int64,
|
||||||
`number` Tuple(Float32, Float32, Float32),
|
`data` Tuple(Float32, Float32, Float32),
|
||||||
INDEX x number TYPE annoy GRANULARITY N
|
INDEX ann_index_name data TYPE ann_index_type(ann_index_parameters) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -63,8 +71,8 @@ ORDER BY id;
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
`id` Int64,
|
`id` Int64,
|
||||||
`number` Array(Float32),
|
`data` Array(Float32),
|
||||||
INDEX x number TYPE annoy GRANULARITY N
|
INDEX ann_index_name data TYPE ann_index_type(ann_index_parameters) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -73,7 +81,7 @@ ORDER BY id;
|
|||||||
With greater `GRANULARITY` indexes remember the data structure better. The `GRANULARITY` indicates how many granules will be used to construct the index. The more data is provided for the index, the more of it can be handled by one index and the more chances that with the right hyperparameters the index will remember the data structure better. But some indexes can't be built if they don't have enough data, so this granule will always participate in the query. For more information, see the description of indexes.
|
With greater `GRANULARITY` indexes remember the data structure better. The `GRANULARITY` indicates how many granules will be used to construct the index. The more data is provided for the index, the more of it can be handled by one index and the more chances that with the right hyperparameters the index will remember the data structure better. But some indexes can't be built if they don't have enough data, so this granule will always participate in the query. For more information, see the description of indexes.
|
||||||
|
|
||||||
As the indexes are built only during insertions into table, `INSERT` and `OPTIMIZE` queries are slower than for ordinary table. At this stage indexes remember all the information about the given data. ANNIndexes should be used if you have immutable or rarely changed data and many read requests.
|
As the indexes are built only during insertions into table, `INSERT` and `OPTIMIZE` queries are slower than for ordinary table. At this stage indexes remember all the information about the given data. ANNIndexes should be used if you have immutable or rarely changed data and many read requests.
|
||||||
|
|
||||||
You can create your table with index which uses certain algorithm. Now only indices based on the following algorithms are supported:
|
You can create your table with index which uses certain algorithm. Now only indices based on the following algorithms are supported:
|
||||||
|
|
||||||
# Index list
|
# Index list
|
||||||
@ -91,8 +99,8 @@ __Examples__:
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
id Int64,
|
id Int64,
|
||||||
number Tuple(Float32, Float32, Float32),
|
data Tuple(Float32, Float32, Float32),
|
||||||
INDEX x number TYPE annoy(T) GRANULARITY N
|
INDEX ann_index_name data TYPE annoy(NumTrees, DistanceName) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
@ -102,18 +110,30 @@ ORDER BY id;
|
|||||||
CREATE TABLE t
|
CREATE TABLE t
|
||||||
(
|
(
|
||||||
id Int64,
|
id Int64,
|
||||||
number Array(Float32),
|
data Array(Float32),
|
||||||
INDEX x number TYPE annoy(T) GRANULARITY N
|
INDEX ann_index_name data TYPE annoy(NumTrees, DistanceName) GRANULARITY N
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree
|
ENGINE = MergeTree
|
||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
```
|
```
|
||||||
!!! note "Note"
|
|
||||||
Table with array field will work faster, but all arrays **must** have same length. Use [CONSTRAINT](../../../sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 CHECK length(number) = 256`.
|
|
||||||
|
|
||||||
Parameter `T` is the number of trees which algorithm will create. The bigger it is, the slower (approximately linear) it works (in both `CREATE` and `SELECT` requests), but the better accuracy you get (adjusted for randomness).
|
:::note
|
||||||
|
Table with array field will work faster, but all arrays **must** have same length. Use [CONSTRAINT](../../../sql-reference/statements/create/table.md#constraints) to avoid errors. For example, `CONSTRAINT constraint_name_1 CHECK length(data) = 256`.
|
||||||
|
:::
|
||||||
|
|
||||||
Annoy supports only `L2Distance`.
|
Parameter `NumTrees` is the number of trees which the algorithm will create. The bigger it is, the slower (approximately linear) it works (in both `CREATE` and `SELECT` requests), but the better accuracy you get (adjusted for randomness). By default it is set to `100`. Parameter `DistanceName` is name of distance function. By default it is set to `L2Distance`. It can be set without changing first parameter, for example
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t
|
||||||
|
(
|
||||||
|
id Int64,
|
||||||
|
data Array(Float32),
|
||||||
|
INDEX ann_index_name data TYPE annoy('cosineDistance') GRANULARITY N
|
||||||
|
)
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY id;
|
||||||
|
```
|
||||||
|
|
||||||
|
Annoy supports `L2Distance` and `cosineDistance`.
|
||||||
|
|
||||||
In the `SELECT` in the settings (`ann_index_select_query_params`) you can specify the size of the internal buffer (more details in the description above or in the [original repository](https://github.com/spotify/annoy)). During the query it will inspect up to `search_k` nodes which defaults to `n_trees * n` if not provided. `search_k` gives you a run-time tradeoff between better accuracy and speed.
|
In the `SELECT` in the settings (`ann_index_select_query_params`) you can specify the size of the internal buffer (more details in the description above or in the [original repository](https://github.com/spotify/annoy)). During the query it will inspect up to `search_k` nodes which defaults to `n_trees * n` if not provided. `search_k` gives you a run-time tradeoff between better accuracy and speed.
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ Example of setting the addresses of the auxiliary ZooKeeper cluster:
|
|||||||
</auxiliary_zookeepers>
|
</auxiliary_zookeepers>
|
||||||
```
|
```
|
||||||
|
|
||||||
To store table datameta in a auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
To store table metadata in an auxiliary ZooKeeper cluster instead of default ZooKeeper cluster, we can use the SQL to create table with
|
||||||
ReplicatedMergeTree engine as follow:
|
ReplicatedMergeTree engine as follow:
|
||||||
|
|
||||||
```
|
```
|
||||||
|
@ -1456,6 +1456,10 @@ If setting [input_format_with_types_use_header](../operations/settings/settings.
|
|||||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
## RowBinary format settings {#row-binary-format-settings}
|
||||||
|
|
||||||
|
- [format_binary_max_string_size](../operations/settings/settings.md#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||||
|
|
||||||
## Values {#data-format-values}
|
## Values {#data-format-values}
|
||||||
|
|
||||||
Prints every row in brackets. Rows are separated by commas. There is no comma after the last row. The values inside the brackets are also comma-separated. Numbers are output in a decimal format without quotes. Arrays are output in square brackets. Strings, dates, and dates with times are output in quotes. Escaping rules and parsing are similar to the [TabSeparated](#tabseparated) format. During formatting, extra spaces aren’t inserted, but during parsing, they are allowed and skipped (except for spaces inside array values, which are not allowed). [NULL](../sql-reference/syntax.md) is represented as `NULL`.
|
Prints every row in brackets. Rows are separated by commas. There is no comma after the last row. The values inside the brackets are also comma-separated. Numbers are output in a decimal format without quotes. Arrays are output in square brackets. Strings, dates, and dates with times are output in quotes. Escaping rules and parsing are similar to the [TabSeparated](#tabseparated) format. During formatting, extra spaces aren’t inserted, but during parsing, they are allowed and skipped (except for spaces inside array values, which are not allowed). [NULL](../sql-reference/syntax.md) is represented as `NULL`.
|
||||||
|
@ -11,6 +11,7 @@ Main cache types:
|
|||||||
|
|
||||||
- `mark_cache` — Cache of marks used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
- `mark_cache` — Cache of marks used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
||||||
- `uncompressed_cache` — Cache of uncompressed data used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
- `uncompressed_cache` — Cache of uncompressed data used by table engines of the [MergeTree](../engines/table-engines/mergetree-family/mergetree.md) family.
|
||||||
|
- Operating system page cache (used indirectly, for files with actual data).
|
||||||
|
|
||||||
Additional cache types:
|
Additional cache types:
|
||||||
|
|
||||||
@ -22,10 +23,4 @@ Additional cache types:
|
|||||||
- Schema inference cache.
|
- Schema inference cache.
|
||||||
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
- [Filesystem cache](storing-data.md) over S3, Azure, Local and other disks.
|
||||||
|
|
||||||
Indirectly used:
|
To drop one of the caches, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md#drop-mark-cache) statements.
|
||||||
|
|
||||||
- OS page cache.
|
|
||||||
|
|
||||||
To drop cache, use [SYSTEM DROP ... CACHE](../sql-reference/statements/system.md) statements.
|
|
||||||
|
|
||||||
[Original article](https://clickhouse.com/docs/en/operations/caches/) <!--hide-->
|
|
||||||
|
@ -668,7 +668,7 @@ log_query_views=1
|
|||||||
|
|
||||||
## log_formatted_queries {#settings-log-formatted-queries}
|
## log_formatted_queries {#settings-log-formatted-queries}
|
||||||
|
|
||||||
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table (populates `formatted_query` column in the [system.query_log](../../operations/system-tables/query_log.md)).
|
Allows to log formatted queries to the [system.query_log](../../operations/system-tables/query_log.md) system table (populates `formatted_query` column in the [system.query_log](../../operations/system-tables/query_log.md)).
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -1807,6 +1807,41 @@ See also:
|
|||||||
|
|
||||||
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
- System table [trace_log](../../operations/system-tables/trace_log.md/#system_tables-trace_log)
|
||||||
|
|
||||||
|
## memory_profiler_step {#memory_profiler_step}
|
||||||
|
|
||||||
|
Sets the step of memory profiler. Whenever query memory usage becomes larger than every next step in number of bytes the memory profiler will collect the allocating stacktrace and will write it into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- A positive integer number of bytes.
|
||||||
|
|
||||||
|
- 0 for turning off the memory profiler.
|
||||||
|
|
||||||
|
Default value: 4,194,304 bytes (4 MiB).
|
||||||
|
|
||||||
|
## memory_profiler_sample_probability {#memory_profiler_sample_probability}
|
||||||
|
|
||||||
|
Sets the probability of collecting stacktraces at random allocations and deallocations and writing them into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- A positive floating-point number in the range [0..1].
|
||||||
|
|
||||||
|
- 0.0 for turning off the memory sampling.
|
||||||
|
|
||||||
|
Default value: 0.0.
|
||||||
|
|
||||||
|
## trace_profile_events {#trace_profile_events}
|
||||||
|
|
||||||
|
Enables or disables collecting stacktraces on each update of profile events along with the name of profile event and the value of increment and sending them into [trace_log](../../operations/system-tables/trace_log.md#system_tables-trace_log).
|
||||||
|
|
||||||
|
Possible values:
|
||||||
|
|
||||||
|
- 1 — Tracing of profile events enabled.
|
||||||
|
- 0 — Tracing of profile events disabled.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## allow_introspection_functions {#settings-allow_introspection_functions}
|
## allow_introspection_functions {#settings-allow_introspection_functions}
|
||||||
|
|
||||||
Enables or disables [introspections functions](../../sql-reference/functions/introspection.md) for query profiling.
|
Enables or disables [introspections functions](../../sql-reference/functions/introspection.md) for query profiling.
|
||||||
@ -4829,3 +4864,11 @@ Disabled by default.
|
|||||||
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
|
Allow skipping columns with unsupported types while schema inference for format BSONEachRow.
|
||||||
|
|
||||||
Disabled by default.
|
Disabled by default.
|
||||||
|
|
||||||
|
## RowBinary format settings {#row-binary-format-settings}
|
||||||
|
|
||||||
|
### format_binary_max_string_size {#format_binary_max_string_size}
|
||||||
|
|
||||||
|
The maximum allowed size for String in RowBinary format. It prevents allocating large amount of memory in case of corrupted data. 0 means there is no limit.
|
||||||
|
|
||||||
|
Default value: `1GiB`
|
||||||
|
@ -5,7 +5,8 @@ slug: /en/operations/system-tables/trace_log
|
|||||||
|
|
||||||
Contains stack traces collected by the sampling query profiler.
|
Contains stack traces collected by the sampling query profiler.
|
||||||
|
|
||||||
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also the [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns) and [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns) settings should be set.
|
ClickHouse creates this table when the [trace_log](../../operations/server-configuration-parameters/settings.md#server_configuration_parameters-trace_log) server configuration section is set. Also see settings: [query_profiler_real_time_period_ns](../../operations/settings/settings.md#query_profiler_real_time_period_ns), [query_profiler_cpu_time_period_ns](../../operations/settings/settings.md#query_profiler_cpu_time_period_ns), [memory_profiler_step](../../operations/settings/settings.md#memory_profiler_step),
|
||||||
|
[memory_profiler_sample_probability](../../operations/settings/settings.md#memory_profiler_sample_probability), [trace_profile_events](../../operations/settings/settings.md#trace_profile_events).
|
||||||
|
|
||||||
To analyze logs, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` introspection functions.
|
To analyze logs, use the `addressToLine`, `addressToLineWithInlines`, `addressToSymbol` and `demangle` introspection functions.
|
||||||
|
|
||||||
@ -29,6 +30,8 @@ Columns:
|
|||||||
- `CPU` represents collecting stack traces by CPU time.
|
- `CPU` represents collecting stack traces by CPU time.
|
||||||
- `Memory` represents collecting allocations and deallocations when memory allocation exceeds the subsequent watermark.
|
- `Memory` represents collecting allocations and deallocations when memory allocation exceeds the subsequent watermark.
|
||||||
- `MemorySample` represents collecting random allocations and deallocations.
|
- `MemorySample` represents collecting random allocations and deallocations.
|
||||||
|
- `MemoryPeak` represents collecting updates of peak memory usage.
|
||||||
|
- `ProfileEvent` represents collecting of increments of profile events.
|
||||||
|
|
||||||
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
|
- `thread_number` ([UInt32](../../sql-reference/data-types/int-uint.md)) — Thread identifier.
|
||||||
|
|
||||||
@ -36,6 +39,12 @@ Columns:
|
|||||||
|
|
||||||
- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process.
|
- `trace` ([Array(UInt64)](../../sql-reference/data-types/array.md)) — Stack trace at the moment of sampling. Each element is a virtual memory address inside ClickHouse server process.
|
||||||
|
|
||||||
|
- `size` ([Int64](../../sql-reference/data-types/int-uint.md)) - For trace types `Memory`, `MemorySample` or `MemoryPeak` is the amount of memory allocated, for other trace types is 0.
|
||||||
|
|
||||||
|
- `event` ([LowCardinality(String)](../../sql-reference/data-types/lowcardinality.md)) - For trace type `ProfileEvent` is the name of updated profile event, for other trace types is an empty string.
|
||||||
|
|
||||||
|
- `increment` ([UInt64](../../sql-reference/data-types/int-uint.md)) - For trace type `ProfileEvent` is the amount of incremnt of profile event, for other trace types is 0.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
|
@ -32,8 +32,8 @@ The null hypothesis is that means of populations are equal. Normal distribution
|
|||||||
|
|
||||||
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated t-statistic. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
- calculated p-value. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- [calculated confidence-interval-low.] [Float64](../../../sql-reference/data-types/float.md).
|
- calculated confidence-interval-low. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
- [calculated confidence-interval-high.] [Float64](../../../sql-reference/data-types/float.md).
|
- calculated confidence-interval-high. [Float64](../../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
@ -161,3 +161,140 @@ Result:
|
|||||||
│ -1 │
|
│ -1 │
|
||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## multiplyDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Performs multiplication on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
These functions work significantly slower than usual `multiply`.
|
||||||
|
In case you don't really need controlled precision and/or need fast computation, consider using [multiply](#multiply)
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
multiplyDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The result of multiplication with given scale.
|
||||||
|
|
||||||
|
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiplyDecimal(toDecimal256(-12, 0), toDecimal32(-2.1, 1), 1)─┐
|
||||||
|
│ 25.2 │
|
||||||
|
└────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Difference from regular multiplication:**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12.647, 3) * toDecimal32(2.1239, 4);
|
||||||
|
SELECT toDecimal64(-12.647, 3) as a, toDecimal32(2.1239, 4) as b, multiplyDecimal(a, b);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiply(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609633 │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
┌─multiplyDecimal(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609 │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
multiplyDecimal(a, b);
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
a * b;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─────────────a─┬─────────────b─┬─multiplyDecimal(toDecimal64(-12.647987876, 9), toDecimal64(123.967645643, 9))─┐
|
||||||
|
│ -12.647987876 │ 123.967645643 │ -1567.941279108 │
|
||||||
|
└───────────────┴───────────────┴───────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
Received exception from server (version 22.11.1):
|
||||||
|
Code: 407. DB::Exception: Received from localhost:9000. DB::Exception: Decimal math overflow: While processing toDecimal64(-12.647987876, 9) AS a, toDecimal64(123.967645643, 9) AS b, a * b. (DECIMAL_OVERFLOW)
|
||||||
|
```
|
||||||
|
|
||||||
|
## divideDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Performs division on two decimals. Result value will be of type [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Result scale can be explicitly specified by `result_scale` argument (const Integer in range `[0, 76]`). If not specified, the result scale is the max scale of given arguments.
|
||||||
|
|
||||||
|
:::note
|
||||||
|
These function work significantly slower than usual `divide`.
|
||||||
|
In case you don't really need controlled precision and/or need fast computation, consider using [divide](#divide).
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
divideDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `a` — First value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Second value: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale of result: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- The result of division with given scale.
|
||||||
|
|
||||||
|
Type: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divideDecimal(toDecimal256(-12, 0), toDecimal32(2.1, 1), 10)─┐
|
||||||
|
│ -5.7142857142 │
|
||||||
|
└──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Difference from regular division:**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 1) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 1) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divide(toDecimal64(-12, 1), toDecimal32(2.1, 1))─┐
|
||||||
|
│ -5.7 │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 0) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 0) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
DB::Exception: Decimal result's scale is less than argument's one: While processing toDecimal64(-12, 0) / toDecimal32(2.1, 1). (ARGUMENT_OUT_OF_BOUND)
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
@ -474,13 +474,13 @@ Calculates the cosine distance between two vectors (the values of the tuples are
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
cosineDistance(tuple1, tuple2)
|
cosineDistance(vector1, vector2)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Arguments**
|
**Arguments**
|
||||||
|
|
||||||
- `tuple1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
- `vector1` — First tuple. [Tuple](../../sql-reference/data-types/tuple.md) or [Array](../../sql-reference/data-types/array.md).
|
||||||
- `tuple2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md).
|
- `vector2` — Second tuple. [Tuple](../../sql-reference/data-types/tuple.md) or [Array](../../sql-reference/data-types/array.md).
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -488,7 +488,7 @@ cosineDistance(tuple1, tuple2)
|
|||||||
|
|
||||||
Type: [Float](../../sql-reference/data-types/float.md).
|
Type: [Float](../../sql-reference/data-types/float.md).
|
||||||
|
|
||||||
**Example**
|
**Examples**
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ Perform the query:
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
dictGet('ext-dict-mult', ('c1','c2'), number) AS val,
|
dictGet('ext-dict-mult', ('c1','c2'), number + 1) AS val,
|
||||||
toTypeName(val) AS type
|
toTypeName(val) AS type
|
||||||
FROM system.numbers
|
FROM system.numbers
|
||||||
LIMIT 3;
|
LIMIT 3;
|
||||||
|
@ -1865,6 +1865,17 @@ Next, specify the path to `libcatboostmodel.<so|dylib>` in the clickhouse config
|
|||||||
</clickhouse>
|
</clickhouse>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
For security and isolation reasons, the model evaluation does not run in the server process but in the clickhouse-library-bridge process.
|
||||||
|
At the first execution of `catboostEvaluate()`, the server starts the library bridge process if it is not running already. Both processes
|
||||||
|
communicate using a HTTP interface. By default, port `9012` is used. A different port can be specified as follows - this is useful if port
|
||||||
|
`9012` is already assigned to a different service.
|
||||||
|
|
||||||
|
``` xml
|
||||||
|
<library_bridge>
|
||||||
|
<port>9019</port>
|
||||||
|
</library_bridge>
|
||||||
|
```
|
||||||
|
|
||||||
2. Train a catboost model using libcatboost
|
2. Train a catboost model using libcatboost
|
||||||
|
|
||||||
See [Training and applying models](https://catboost.ai/docs/features/training.html#training) for how to train catboost models from a training data set.
|
See [Training and applying models](https://catboost.ai/docs/features/training.html#training) for how to train catboost models from a training data set.
|
||||||
|
@ -11,6 +11,14 @@ Projections store data in a format that optimizes query execution, this feature
|
|||||||
|
|
||||||
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
|
You can define one or more projections for a table, and during the query analysis the projection with the least data to scan will be selected by ClickHouse without modifying the query provided by the user.
|
||||||
|
|
||||||
|
:::note Disk usage
|
||||||
|
|
||||||
|
Projections will create internally a new hidden table, this means that more IO and space on disk will be required.
|
||||||
|
Example, If the projection has defined a different primary key, all the data from the original table will be duplicated.
|
||||||
|
:::
|
||||||
|
|
||||||
|
You can see more technical details about how projections work internally on this [page](/docs/en/guides/improving-query-performance/sparse-primary-indexes/sparse-primary-indexes-multiple.md/#option-3-projections).
|
||||||
|
|
||||||
## Example filtering without using primary keys
|
## Example filtering without using primary keys
|
||||||
|
|
||||||
Creating the table:
|
Creating the table:
|
||||||
|
@ -59,6 +59,28 @@ If the table already exists and `IF NOT EXISTS` is specified, the query won’t
|
|||||||
|
|
||||||
There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines).
|
There can be other clauses after the `ENGINE` clause in the query. See detailed documentation on how to create tables in the descriptions of [table engines](../../../engines/table-engines/index.md#table_engines).
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
In ClickHouse Cloud please split this into two steps:
|
||||||
|
1. Create the table structure
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE t1
|
||||||
|
ENGINE = MergeTree
|
||||||
|
ORDER BY ...
|
||||||
|
# highlight-next-line
|
||||||
|
EMPTY AS
|
||||||
|
SELECT ...
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Populate the table
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO t1
|
||||||
|
SELECT ...
|
||||||
|
```
|
||||||
|
|
||||||
|
:::
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
@ -159,7 +181,7 @@ ENGINE = engine
|
|||||||
PRIMARY KEY(expr1[, expr2,...]);
|
PRIMARY KEY(expr1[, expr2,...]);
|
||||||
```
|
```
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
You can't combine both ways in one query.
|
You can't combine both ways in one query.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -215,7 +237,7 @@ ALTER TABLE codec_example MODIFY COLUMN float_value CODEC(Default);
|
|||||||
|
|
||||||
Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`.
|
Codecs can be combined in a pipeline, for example, `CODEC(Delta, Default)`.
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility.
|
You can’t decompress ClickHouse database files with external utilities like `lz4`. Instead, use the special [clickhouse-compressor](https://github.com/ClickHouse/ClickHouse/tree/master/programs/compressor) utility.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -301,44 +323,44 @@ Encryption codecs:
|
|||||||
|
|
||||||
#### AES_128_GCM_SIV
|
#### AES_128_GCM_SIV
|
||||||
|
|
||||||
`CODEC('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode.
|
`CODEC('AES-128-GCM-SIV')` — Encrypts data with AES-128 in [RFC 8452](https://tools.ietf.org/html/rfc8452) GCM-SIV mode.
|
||||||
|
|
||||||
|
|
||||||
#### AES-256-GCM-SIV
|
#### AES-256-GCM-SIV
|
||||||
|
|
||||||
`CODEC('AES-256-GCM-SIV')` — Encrypts data with AES-256 in GCM-SIV mode.
|
`CODEC('AES-256-GCM-SIV')` — Encrypts data with AES-256 in GCM-SIV mode.
|
||||||
|
|
||||||
These codecs use a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence, without getting its content).
|
These codecs use a fixed nonce and encryption is therefore deterministic. This makes it compatible with deduplicating engines such as [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) but has a weakness: when the same data block is encrypted twice, the resulting ciphertext will be exactly the same so an adversary who can read the disk can see this equivalence (although only the equivalence, without getting its content).
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
Most engines including the "\*MergeTree" family create index files on disk without applying codecs. This means plaintext will appear on disk if an encrypted column is indexed.
|
Most engines including the "\*MergeTree" family create index files on disk without applying codecs. This means plaintext will appear on disk if an encrypted column is indexed.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::warning
|
:::warning
|
||||||
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
|
If you perform a SELECT query mentioning a specific value in an encrypted column (such as in its WHERE clause), the value may appear in [system.query_log](../../../operations/system-tables/query_log.md). You may want to disable the logging.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE mytable
|
CREATE TABLE mytable
|
||||||
(
|
(
|
||||||
x String Codec(AES_128_GCM_SIV)
|
x String Codec(AES_128_GCM_SIV)
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree ORDER BY x;
|
ENGINE = MergeTree ORDER BY x;
|
||||||
```
|
```
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
If compression needs to be applied, it must be explicitly specified. Otherwise, only encryption will be applied to data.
|
If compression needs to be applied, it must be explicitly specified. Otherwise, only encryption will be applied to data.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE mytable
|
CREATE TABLE mytable
|
||||||
(
|
(
|
||||||
x String Codec(Delta, LZ4, AES_128_GCM_SIV)
|
x String Codec(Delta, LZ4, AES_128_GCM_SIV)
|
||||||
)
|
)
|
||||||
ENGINE = MergeTree ORDER BY x;
|
ENGINE = MergeTree ORDER BY x;
|
||||||
```
|
```
|
||||||
|
|
||||||
@ -372,7 +394,7 @@ It’s possible to use tables with [ENGINE = Memory](../../../engines/table-engi
|
|||||||
|
|
||||||
'REPLACE' query allows you to update the table atomically.
|
'REPLACE' query allows you to update the table atomically.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
This query is supported only for [Atomic](../../../engines/database-engines/atomic.md) database engine.
|
This query is supported only for [Atomic](../../../engines/database-engines/atomic.md) database engine.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
@ -388,7 +410,7 @@ RENAME TABLE myNewTable TO myOldTable;
|
|||||||
Instead of above, you can use the following:
|
Instead of above, you can use the following:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
REPLACE TABLE myOldTable SELECT * FROM myOldTable WHERE CounterID <12345;
|
REPLACE TABLE myOldTable ENGINE = MergeTree() ORDER BY CounterID AS SELECT * FROM myOldTable WHERE CounterID <12345;
|
||||||
```
|
```
|
||||||
|
|
||||||
### Syntax
|
### Syntax
|
||||||
@ -448,7 +470,7 @@ SELECT * FROM base.t1;
|
|||||||
|
|
||||||
You can add a comment to the table when you creating it.
|
You can add a comment to the table when you creating it.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The comment is supported for all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md).
|
The comment is supported for all table engines except [Kafka](../../../engines/table-engines/integrations/kafka.md), [RabbitMQ](../../../engines/table-engines/integrations/rabbitmq.md) and [EmbeddedRocksDB](../../../engines/table-engines/integrations/embedded-rocksdb.md).
|
||||||
:::
|
:::
|
||||||
|
|
||||||
|
@ -60,7 +60,7 @@ If you specify `POPULATE`, the existing table data is inserted into the view whe
|
|||||||
|
|
||||||
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
|
A `SELECT` query can contain `DISTINCT`, `GROUP BY`, `ORDER BY`, `LIMIT`. Note that the corresponding conversions are performed independently on each block of inserted data. For example, if `GROUP BY` is set, data is aggregated during insertion, but only within a single packet of inserted data. The data won’t be further aggregated. The exception is when using an `ENGINE` that independently performs data aggregation, such as `SummingMergeTree`.
|
||||||
|
|
||||||
The execution of [ALTER](../../../sql-reference/statements/alter/view.md) queries on materialized views has limitations, so they might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
|
The execution of [ALTER](/docs/en/sql-reference/statements/alter/view.md) queries on materialized views has limitations, for example, you can not update the `SELECT` query, so this might be inconvenient. If the materialized view uses the construction `TO [db.]name`, you can `DETACH` the view, run `ALTER` for the target table, and then `ATTACH` the previously detached (`DETACH`) view.
|
||||||
|
|
||||||
Note that materialized view is influenced by [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged before the insertion into a view.
|
Note that materialized view is influenced by [optimize_on_insert](../../../operations/settings/settings.md#optimize-on-insert) setting. The data is merged before the insertion into a view.
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ Shows the execution plan of a statement.
|
|||||||
Syntax:
|
Syntax:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
EXPLAIN [AST | SYNTAX | PLAN | PIPELINE | ESTIMATE | TABLE OVERRIDE] [setting = value, ...]
|
EXPLAIN [AST | SYNTAX | QUERY TREE | PLAN | PIPELINE | ESTIMATE | TABLE OVERRIDE] [setting = value, ...]
|
||||||
[
|
[
|
||||||
SELECT ... |
|
SELECT ... |
|
||||||
tableFunction(...) [COLUMNS (...)] [ORDER BY ...] [PARTITION BY ...] [PRIMARY KEY] [SAMPLE BY ...] [TTL ...]
|
tableFunction(...) [COLUMNS (...)] [ORDER BY ...] [PARTITION BY ...] [PRIMARY KEY] [SAMPLE BY ...] [TTL ...]
|
||||||
|
@ -98,7 +98,7 @@ ClickHouse предоставляет возможность аутентифи
|
|||||||
|
|
||||||
|
|
||||||
:::danger "Важно"
|
:::danger "Важно"
|
||||||
Если пользователь настроен для Kerberos-аутентификации, другие виды уатентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
|
Если пользователь настроен для Kerberos-аутентификации, другие виды аутентификации будут для него недоступны. Если наряду с `kerberos` в определении пользователя будет указан какой-либо другой способ аутентификации, ClickHouse завершит работу.
|
||||||
|
|
||||||
:::info ""
|
:::info ""
|
||||||
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
|
Ещё раз отметим, что кроме `users.xml`, необходимо также включить Kerberos в `config.xml`.
|
||||||
|
@ -159,3 +159,150 @@ SELECT min2(-1, 2);
|
|||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## multiplyDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Совершает умножение двух Decimal. Результат будет иметь тип [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Scale (размер дробной части) результат можно явно задать аргументом `result_scale` (целочисленная константа из интервала `[0, 76]`).
|
||||||
|
Если этот аргумент не задан, то scale результата будет равен наибольшему из scale обоих аргументов.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
multiplyDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Эта функция работают гораздо медленнее обычной `multiply`.
|
||||||
|
В случае, если нет необходимости иметь фиксированную точность и/или нужны быстрые вычисления, следует использовать [multiply](#multiply).
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `a` — Первый сомножитель/делимое: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Второй сомножитель/делитель: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale результата: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Результат умножения с заданным scale.
|
||||||
|
|
||||||
|
Тип: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT multiplyDecimal(toDecimal256(-12, 0), toDecimal32(-2.1, 1), 1);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiplyDecimal(toDecimal256(-12, 0), toDecimal32(-2.1, 1), 1)─┐
|
||||||
|
│ 25.2 │
|
||||||
|
└────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Отличие от стандартных функций**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12.647, 3) * toDecimal32(2.1239, 4);
|
||||||
|
SELECT toDecimal64(-12.647, 3) as a, toDecimal32(2.1239, 4) as b, multiplyDecimal(a, b);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─multiply(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609633 │
|
||||||
|
└───────────────────────────────────────────────────────────┘
|
||||||
|
┌─multiplyDecimal(toDecimal64(-12.647, 3), toDecimal32(2.1239, 4))─┐
|
||||||
|
│ -26.8609 │
|
||||||
|
└──────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
multiplyDecimal(a, b);
|
||||||
|
|
||||||
|
SELECT
|
||||||
|
toDecimal64(-12.647987876, 9) AS a,
|
||||||
|
toDecimal64(123.967645643, 9) AS b,
|
||||||
|
a * b;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─────────────a─┬─────────────b─┬─multiplyDecimal(toDecimal64(-12.647987876, 9), toDecimal64(123.967645643, 9))─┐
|
||||||
|
│ -12.647987876 │ 123.967645643 │ -1567.941279108 │
|
||||||
|
└───────────────┴───────────────┴───────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
Received exception from server (version 22.11.1):
|
||||||
|
Code: 407. DB::Exception: Received from localhost:9000. DB::Exception: Decimal math overflow: While processing toDecimal64(-12.647987876, 9) AS a, toDecimal64(123.967645643, 9) AS b, a * b. (DECIMAL_OVERFLOW)
|
||||||
|
```
|
||||||
|
|
||||||
|
## divideDecimal(a, b[, result_scale])
|
||||||
|
|
||||||
|
Совершает деление двух Decimal. Результат будет иметь тип [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
Scale (размер дробной части) результат можно явно задать аргументом `result_scale` (целочисленная константа из интервала `[0, 76]`).
|
||||||
|
Если этот аргумент не задан, то scale результата будет равен наибольшему из scale обоих аргументов.
|
||||||
|
|
||||||
|
**Синтаксис**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
divideDecimal(a, b[, result_scale])
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
Эта функция работает гораздо медленнее обычной `divide`.
|
||||||
|
В случае, если нет необходимости иметь фиксированную точность и/или нужны быстрые вычисления, следует использовать [divide](#divide).
|
||||||
|
:::
|
||||||
|
|
||||||
|
**Аргументы**
|
||||||
|
|
||||||
|
- `a` — Первый сомножитель/делимое: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `b` — Второй сомножитель/делитель: [Decimal](../../sql-reference/data-types/decimal.md).
|
||||||
|
- `result_scale` — Scale результата: [Int/UInt](../../sql-reference/data-types/int-uint.md).
|
||||||
|
|
||||||
|
**Возвращаемое значение**
|
||||||
|
|
||||||
|
- Результат деления с заданным scale.
|
||||||
|
|
||||||
|
Тип: [Decimal256](../../sql-reference/data-types/decimal.md).
|
||||||
|
|
||||||
|
**Примеры**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT divideDecimal(toDecimal256(-12, 0), toDecimal32(2.1, 1), 10);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divideDecimal(toDecimal256(-12, 0), toDecimal32(2.1, 1), 10)─┐
|
||||||
|
│ -5.7142857142 │
|
||||||
|
└──────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
**Отличие от стандартных функций**
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 1) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 1) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─divide(toDecimal64(-12, 1), toDecimal32(2.1, 1))─┐
|
||||||
|
│ -5.7 │
|
||||||
|
└──────────────────────────────────────────────────┘
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 1), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT toDecimal64(-12, 0) / toDecimal32(2.1, 1);
|
||||||
|
SELECT toDecimal64(-12, 0) as a, toDecimal32(2.1, 1) as b, divideDecimal(a, b, 1), divideDecimal(a, b, 5);
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
DB::Exception: Decimal result's scale is less than argument's one: While processing toDecimal64(-12, 0) / toDecimal32(2.1, 1). (ARGUMENT_OUT_OF_BOUND)
|
||||||
|
|
||||||
|
┌───a─┬───b─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 1)─┬─divideDecimal(toDecimal64(-12, 0), toDecimal32(2.1, 1), 5)─┐
|
||||||
|
│ -12 │ 2.1 │ -5.7 │ -5.71428 │
|
||||||
|
└─────┴─────┴────────────────────────────────────────────────────────────┴────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ LIMIT 3;
|
|||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
dictGet('ext-dict-mult', ('c1','c2'), number) AS val,
|
dictGet('ext-dict-mult', ('c1','c2'), number + 1) AS val,
|
||||||
toTypeName(val) AS type
|
toTypeName(val) AS type
|
||||||
FROM system.numbers
|
FROM system.numbers
|
||||||
LIMIT 3;
|
LIMIT 3;
|
||||||
|
@ -19,7 +19,7 @@ then
|
|||||||
# Will make a repository with website content as the only commit.
|
# Will make a repository with website content as the only commit.
|
||||||
git init
|
git init
|
||||||
git remote add origin "${GIT_PROD_URI}"
|
git remote add origin "${GIT_PROD_URI}"
|
||||||
git config user.email "robot-clickhouse@clickhouse.com"
|
git config user.email "robot-clickhouse@users.noreply.github.com"
|
||||||
git config user.name "robot-clickhouse"
|
git config user.name "robot-clickhouse"
|
||||||
|
|
||||||
# Add files.
|
# Add files.
|
||||||
|
@ -74,7 +74,7 @@ Kafka 特性:
|
|||||||
|
|
||||||
消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。
|
消费的消息会被自动追踪,因此每个消息在不同的消费组里只会记录一次。如果希望获得两次数据,则使用另一个组名创建副本。
|
||||||
|
|
||||||
消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 http://kafka.apache.org/intro。
|
消费组可以灵活配置并且在集群之间同步。例如,如果群集中有10个主题和5个表副本,则每个副本将获得2个主题。 如果副本数量发生变化,主题将自动在副本中重新分配。了解更多信息请访问 [http://kafka.apache.org/intro](http://kafka.apache.org/intro)。
|
||||||
|
|
||||||
`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做:
|
`SELECT` 查询对于读取消息并不是很有用(调试除外),因为每条消息只能被读取一次。使用物化视图创建实时线程更实用。您可以这样做:
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ SETTINGS index_granularity = 8192, index_granularity_bytes = 0;
|
|||||||
<li><font face = "monospace">index_granularity</font>: 显式设置为其默认值8192。这意味着对于每一组8192行,主索引将有一个索引条目,例如,如果表包含16384行,那么索引将有两个索引条目。
|
<li><font face = "monospace">index_granularity</font>: 显式设置为其默认值8192。这意味着对于每一组8192行,主索引将有一个索引条目,例如,如果表包含16384行,那么索引将有两个索引条目。
|
||||||
</li>
|
</li>
|
||||||
<br/>
|
<br/>
|
||||||
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">字适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
|
<li><font face = "monospace">index_granularity_bytes</font>: 设置为0表示禁止<a href="https://clickhouse.com/docs/en/whats-new/changelog/2019/#experimental-features-1" target="_blank"><font color="blue">自适应索引粒度</font></a>。自适应索引粒度意味着ClickHouse自动为一组n行创建一个索引条目
|
||||||
<ul>
|
<ul>
|
||||||
<li>如果n小于8192,但n行的合并行数据大小大于或等于10MB (index_granularity_bytes的默认值)或</li>
|
<li>如果n小于8192,但n行的合并行数据大小大于或等于10MB (index_granularity_bytes的默认值)或</li>
|
||||||
<li>n达到8192</li>
|
<li>n达到8192</li>
|
||||||
@ -777,7 +777,7 @@ ClickHouse现在创建了一个额外的索引来存储—每组4个连续的颗
|
|||||||
如果我们想显著加快我们的两个示例查询——一个过滤具有特定UserID的行,一个过滤具有特定URL的行——那么我们需要使用多个主索引,通过使用这三个方法中的一个:
|
如果我们想显著加快我们的两个示例查询——一个过滤具有特定UserID的行,一个过滤具有特定URL的行——那么我们需要使用多个主索引,通过使用这三个方法中的一个:
|
||||||
|
|
||||||
- 新建一个不同主键的新表。
|
- 新建一个不同主键的新表。
|
||||||
- 创建一个雾化视图。
|
- 创建一个物化视图。
|
||||||
- 增加projection。
|
- 增加projection。
|
||||||
|
|
||||||
这三个方法都会有效地将示例数据复制到另一个表中,以便重新组织表的主索引和行排序顺序。
|
这三个方法都会有效地将示例数据复制到另一个表中,以便重新组织表的主索引和行排序顺序。
|
||||||
@ -992,7 +992,7 @@ Ok.
|
|||||||
|
|
||||||
:::note
|
:::note
|
||||||
- 我们在视图的主键中切换键列的顺序(与原始表相比)
|
- 我们在视图的主键中切换键列的顺序(与原始表相比)
|
||||||
- 雾化视图由一个隐藏表支持,该表的行顺序和主索引基于给定的主键定义
|
- 物化视图由一个隐藏表支持,该表的行顺序和主索引基于给定的主键定义
|
||||||
- 我们使用POPULATE关键字,以便用源表hits_UserID_URL中的所有887万行立即导入新的物化视图
|
- 我们使用POPULATE关键字,以便用源表hits_UserID_URL中的所有887万行立即导入新的物化视图
|
||||||
- 如果在源表hits_UserID_URL中插入了新行,那么这些行也会自动插入到隐藏表中
|
- 如果在源表hits_UserID_URL中插入了新行,那么这些行也会自动插入到隐藏表中
|
||||||
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们上面显式创建的辅助表相同:
|
- 实际上,隐式创建的隐藏表的行顺序和主索引与我们上面显式创建的辅助表相同:
|
||||||
@ -1082,7 +1082,7 @@ ALTER TABLE hits_UserID_URL
|
|||||||
);
|
);
|
||||||
```
|
```
|
||||||
|
|
||||||
雾化projection:
|
物化projection:
|
||||||
```sql
|
```sql
|
||||||
ALTER TABLE hits_UserID_URL
|
ALTER TABLE hits_UserID_URL
|
||||||
MATERIALIZE PROJECTION prj_url_userid;
|
MATERIALIZE PROJECTION prj_url_userid;
|
||||||
|
@ -5,17 +5,17 @@ sidebar_label: LIMIT BY
|
|||||||
|
|
||||||
# LIMIT BY子句 {#limit-by-clause}
|
# LIMIT BY子句 {#limit-by-clause}
|
||||||
|
|
||||||
与查询 `LIMIT n BY expressions` 子句选择第一个 `n` 每个不同值的行 `expressions`. `LIMIT BY` 可以包含任意数量的 [表达式](../../../sql-reference/syntax.md#syntax-expressions).
|
一个使用`LIMIT n BY expressions`从句的查询会以去重后的`expressions`结果分组,每一分组选择前`n`行。`LIMIT BY`指定的值可以是任意数量的[表达式](../../../sql-reference/syntax.md#syntax-expressions)。
|
||||||
|
|
||||||
ClickHouse支持以下语法变体:
|
ClickHouse支持以下语法变体:
|
||||||
|
|
||||||
- `LIMIT [offset_value, ]n BY expressions`
|
- `LIMIT [offset_value, ]n BY expressions`
|
||||||
- `LIMIT n OFFSET offset_value BY expressions`
|
- `LIMIT n OFFSET offset_value BY expressions`
|
||||||
|
|
||||||
在进行查询处理时,ClickHouse选择按排序键排序的数据。排序键设置显式地使用一个[ORDER BY](order-by.md#select-order-by)条款或隐式属性表的引擎(行顺序只是保证在使用[ORDER BY](order-by.md#select-order-by),否则不会命令行块由于多线程)。然后ClickHouse应用`LIMIT n BY 表达式`,并为每个不同的`表达式`组合返回前n行。如果指定了`OFFSET`,那么对于每个属于不同`表达式`组合的数据块,ClickHouse将跳过`offset_value`从块开始的行数,并最终返回最多`n`行的结果。如果`offset_value`大于数据块中的行数,则ClickHouse从数据块中返回零行。
|
处理查询时,ClickHouse首先选择经由排序键排序过后的数据。排序键可以显式地使用[ORDER BY](order-by.md#select-order-by)从句指定,或隐式地使用表引擎使用的排序键(数据的顺序仅在使用[ORDER BY](order-by.md#select-order-by)时才可以保证,否则由于多线程处理,数据顺序会随机化)。然后ClickHouse执行`LIMIT n BY expressions`从句,将每一行按 `expressions` 的值进行分组,并对每一分组返回前`n`行。如果指定了`OFFSET`,那么对于每一分组,ClickHouse会跳过前`offset_value`行,接着返回前`n`行。如果`offset_value`大于某一分组的行数,ClickHouse会从分组返回0行。
|
||||||
|
|
||||||
!!! note "注"
|
!!! note "注"
|
||||||
`LIMIT BY` 是不相关的 [LIMIT](../../../sql-reference/statements/select/limit.md). 它们都可以在同一个查询中使用。
|
`LIMIT BY`与[LIMIT](../../../sql-reference/statements/select/limit.md)没有关系。它们可以在同一个查询中使用。
|
||||||
|
|
||||||
## 例 {#examples}
|
## 例 {#examples}
|
||||||
|
|
||||||
@ -53,9 +53,9 @@ SELECT * FROM limit_by ORDER BY id, val LIMIT 1, 2 BY id
|
|||||||
└────┴─────┘
|
└────┴─────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
该 `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` 查询返回相同的结果。
|
与 `SELECT * FROM limit_by ORDER BY id, val LIMIT 2 OFFSET 1 BY id` 返回相同的结果。
|
||||||
|
|
||||||
以下查询返回每个引用的前5个引用 `domain, device_type` 最多可与100行配对 (`LIMIT n BY + LIMIT`).
|
以下查询返回每个`domain,device_type`组合的前5个refferrer,总计返回至多100行(`LIMIT n BY + LIMIT`)。
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -3,6 +3,66 @@ slug: /zh/whats-new/security-changelog
|
|||||||
sidebar_position: 76
|
sidebar_position: 76
|
||||||
sidebar_label: 安全更新日志
|
sidebar_label: 安全更新日志
|
||||||
---
|
---
|
||||||
|
# 安全更新日志
|
||||||
|
## 修复于ClickHouse 22.9.1.2603, 2022-09-22
|
||||||
|
### CVE-2022-44011
|
||||||
|
ClickHouse server中发现了一个堆缓冲区溢出问题。拥有向ClickHouse Server导入数据能力的恶意用户,可通过插入畸形CapnProto对象使ClickHouse Server对象崩溃。
|
||||||
|
|
||||||
|
修复已推送至版本22.9.1.2603, 22.8.2.11,22.7.4.16,22.6.6.16,22.3.12.19
|
||||||
|
|
||||||
|
作者:Kiojj(独立研究者)
|
||||||
|
|
||||||
|
### CVE-2022-44010
|
||||||
|
ClickHouse server中发现了一个堆缓冲区溢出问题。攻击者可发送一个特殊的HTTP请求至HTTP端口(默认监听在8123端口),该攻击可造成堆缓冲区溢出进而使ClickHouse server进程崩溃。执行该攻击无需认证。
|
||||||
|
|
||||||
|
修复版本已推送至版本22.9.1.2603,22.8.2.11,22.7.4.16,22.6.6.16,22.3.12.19
|
||||||
|
|
||||||
|
作者:Kiojj(独立研究者)
|
||||||
|
|
||||||
|
## 修复于ClickHouse 21.10.2.15,2021-10-18
|
||||||
|
### CVE-2021-43304
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的LZ4压缩编码会堆缓冲区溢出。LZ4:decompressImpl循环尤其是wildCopy<copy_amount>(op, ip, copy_end)中的随意复制操作没有验证是否会导致超出目标缓冲区限制。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-43305
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的LZ4压缩编码会堆缓冲区溢出。LZ4:decompressImpl循环尤其是wildCopy<copy_amount>(op, ip, copy_end)中的随意复制操作没有验证是否会导致超出目标缓冲区限制。
|
||||||
|
该问题于CVE-2021-43304非常相似,但是无保护的copy操作存在于不同的wildCopy调用里。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-42387
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的LZ4:decompressImpl循环会从压缩数据中读取一个用户提供的16bit无符号值('offset')。这个offset后面在复制操作作为长度使用时,没有检查是否超过复制源的上限。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-42388
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的LZ4:decompressImpl循环会从压缩数据中读取一个用户提供的16bit无符号值('offset')。这个offset后面在复制操作作为长度使用时,没有检查是否越过复制源的下限。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-42389
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的Delta压缩编码存在除零错误。压缩缓存的首字节在取模时没有判断是否为0。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-42390
|
||||||
|
在对恶意查询做语法分析时,ClickHouse的DeltaDouble压缩编码存在除零错误。压缩缓存的首字节在取模时没有判断是否为0。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
### CVE-2021-42391
|
||||||
|
在对恶意查询做语法分析时, ClickHouse的Gorilla压缩编码存在除零错误,压缩缓存的首字节取模时没有判断是否为0。
|
||||||
|
|
||||||
|
作者:JFrog 安全研究团队
|
||||||
|
|
||||||
|
## 修复于ClickHouse 21.4.3.21,2021-04-12
|
||||||
|
### CVE-2021-25263
|
||||||
|
拥有CREATE DICTIONARY权限的攻击者,可以读取许可目录之外的任意文件。
|
||||||
|
|
||||||
|
修复已推送至版本20.8.18.32-lts,21.1.9.41-stable,21.2.9.41-stable,21.3.6.55-lts,21.4.3.21-stable以及更早期的版本。
|
||||||
|
|
||||||
|
作者:[Vyacheslav Egoshin](https://twitter.com/vegoshin)
|
||||||
|
|
||||||
## 修复于ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10}
|
## 修复于ClickHouse Release 19.14.3.3, 2019-09-10 {#fixed-in-clickhouse-release-19-14-3-3-2019-09-10}
|
||||||
|
|
||||||
|
@ -15,18 +15,26 @@
|
|||||||
</openSSL>
|
</openSSL>
|
||||||
<!--
|
<!--
|
||||||
It's a custom prompt settings for the clickhouse-client
|
It's a custom prompt settings for the clickhouse-client
|
||||||
|
|
||||||
Possible macros:
|
Possible macros:
|
||||||
{host}
|
{host}
|
||||||
{port}
|
{port}
|
||||||
{user}
|
{user}
|
||||||
{display_name}
|
{display_name}
|
||||||
Terminal colors: https://misc.flogisoft.com/bash/tip_colors_and_formatting
|
|
||||||
See also: https://wiki.hackzine.org/development/misc/readline-color-prompt.html
|
You can also use colored prompt, like in [1].
|
||||||
|
|
||||||
|
[1]: https://misc.flogisoft.com/bash/tip_colors_and_formatting
|
||||||
|
|
||||||
|
But note, that ClickHouse does not use readline anymore, instead it uses
|
||||||
|
replxx. This means that you don't need the following:
|
||||||
|
- RL_PROMPT_START_IGNORE (\001)
|
||||||
|
- RL_PROMPT_END_IGNORE (\002)
|
||||||
-->
|
-->
|
||||||
<prompt_by_server_display_name>
|
<prompt_by_server_display_name>
|
||||||
<default>{display_name} :) </default>
|
<default>{display_name} :) </default>
|
||||||
<test>{display_name} \x01\e[1;32m\x02:)\x01\e[0m\x02 </test> <!-- if it matched to the substring "test" in the server display name - -->
|
<test>{display_name} \e[1;32m:)\e[0m </test> <!-- if it matched to the substring "test" in the server display name - -->
|
||||||
<production>{display_name} \x01\e[1;31m\x02:)\x01\e[0m\x02 </production> <!-- if it matched to the substring "production" in the server display name -->
|
<production>{display_name} \e[1;31m:)\e[0m </production> <!-- if it matched to the substring "production" in the server display name -->
|
||||||
</prompt_by_server_display_name>
|
</prompt_by_server_display_name>
|
||||||
|
|
||||||
<!--
|
<!--
|
||||||
|
@ -1142,7 +1142,7 @@ TaskStatus ClusterCopier::tryCreateDestinationTable(const ConnectionTimeouts & t
|
|||||||
InterpreterCreateQuery::prepareOnClusterQuery(create, getContext(), task_table.cluster_push_name);
|
InterpreterCreateQuery::prepareOnClusterQuery(create, getContext(), task_table.cluster_push_name);
|
||||||
String query = queryToString(create_query_push_ast);
|
String query = queryToString(create_query_push_ast);
|
||||||
|
|
||||||
LOG_INFO(log, "Create destination tables. Query: \n {}", query);
|
LOG_INFO(log, "Create destination tables. Query: {}", query);
|
||||||
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE);
|
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE);
|
||||||
LOG_INFO(
|
LOG_INFO(
|
||||||
log,
|
log,
|
||||||
@ -1413,7 +1413,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
|||||||
auto create_query_push_ast = rewriteCreateQueryStorage(create_query_ast, database_and_table_for_current_piece, new_engine_push_ast);
|
auto create_query_push_ast = rewriteCreateQueryStorage(create_query_ast, database_and_table_for_current_piece, new_engine_push_ast);
|
||||||
String query = queryToString(create_query_push_ast);
|
String query = queryToString(create_query_push_ast);
|
||||||
|
|
||||||
LOG_INFO(log, "Create destination tables. Query: \n {}", query);
|
LOG_INFO(log, "Create destination tables. Query: {}", query);
|
||||||
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE);
|
UInt64 shards = executeQueryOnCluster(task_table.cluster_push, query, task_cluster->settings_push, ClusterExecutionMode::ON_EACH_NODE);
|
||||||
LOG_INFO(
|
LOG_INFO(
|
||||||
log,
|
log,
|
||||||
@ -1517,7 +1517,7 @@ TaskStatus ClusterCopier::processPartitionPieceTaskImpl(
|
|||||||
// Select all fields
|
// Select all fields
|
||||||
ASTPtr query_select_ast = get_select_query(task_shard.table_read_shard, "*", /*enable_splitting*/ true, inject_fault ? "1" : "");
|
ASTPtr query_select_ast = get_select_query(task_shard.table_read_shard, "*", /*enable_splitting*/ true, inject_fault ? "1" : "");
|
||||||
|
|
||||||
LOG_INFO(log, "Executing SELECT query and pull from {} : {}", task_shard.getDescription(), queryToString(query_select_ast));
|
LOG_INFO(log, "Executing SELECT query and pull from {}: {}", task_shard.getDescription(), queryToString(query_select_ast));
|
||||||
|
|
||||||
ASTPtr query_insert_ast;
|
ASTPtr query_insert_ast;
|
||||||
{
|
{
|
||||||
@ -1871,7 +1871,7 @@ std::set<String> ClusterCopier::getShardPartitions(const ConnectionTimeouts & ti
|
|||||||
const auto & settings = getContext()->getSettingsRef();
|
const auto & settings = getContext()->getSettingsRef();
|
||||||
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
|
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
|
||||||
|
|
||||||
LOG_INFO(log, "Computing destination partition set, executing query: \n {}", query);
|
LOG_INFO(log, "Computing destination partition set, executing query: {}", query);
|
||||||
|
|
||||||
auto local_context = Context::createCopy(context);
|
auto local_context = Context::createCopy(context);
|
||||||
local_context->setSettings(task_cluster->settings_pull);
|
local_context->setSettings(task_cluster->settings_pull);
|
||||||
@ -1922,7 +1922,7 @@ bool ClusterCopier::checkShardHasPartition(const ConnectionTimeouts & timeouts,
|
|||||||
const auto & settings = getContext()->getSettingsRef();
|
const auto & settings = getContext()->getSettingsRef();
|
||||||
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
|
ASTPtr query_ast = parseQuery(parser_query, query, settings.max_query_size, settings.max_parser_depth);
|
||||||
|
|
||||||
LOG_INFO(log, "Checking shard {} for partition {} existence, executing query: \n {}",
|
LOG_INFO(log, "Checking shard {} for partition {} existence, executing query: {}",
|
||||||
task_shard.getDescription(), partition_quoted_name, query_ast->formatForErrorMessage());
|
task_shard.getDescription(), partition_quoted_name, query_ast->formatForErrorMessage());
|
||||||
|
|
||||||
auto local_context = Context::createCopy(context);
|
auto local_context = Context::createCopy(context);
|
||||||
@ -1964,7 +1964,7 @@ bool ClusterCopier::checkPresentPartitionPiecesOnCurrentShard(const ConnectionTi
|
|||||||
|
|
||||||
query += " LIMIT 1";
|
query += " LIMIT 1";
|
||||||
|
|
||||||
LOG_INFO(log, "Checking shard {} for partition {} piece {} existence, executing query: \n \u001b[36m {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query);
|
LOG_INFO(log, "Checking shard {} for partition {} piece {} existence, executing query: {}", task_shard.getDescription(), partition_quoted_name, std::to_string(current_piece_number), query);
|
||||||
|
|
||||||
ParserQuery parser_query(query.data() + query.size());
|
ParserQuery parser_query(query.data() + query.size());
|
||||||
const auto & settings = getContext()->getSettingsRef();
|
const auto & settings = getContext()->getSettingsRef();
|
||||||
@ -2046,7 +2046,7 @@ UInt64 ClusterCopier::executeQueryOnCluster(
|
|||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
LOG_WARNING(log, "An error occurred while processing query : \n {}", query);
|
LOG_WARNING(log, "An error occurred while processing query: {}", query);
|
||||||
tryLogCurrentException(log);
|
tryLogCurrentException(log);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -18,28 +18,28 @@ require (
|
|||||||
github.com/spf13/cobra v1.3.0
|
github.com/spf13/cobra v1.3.0
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/spf13/viper v1.10.1
|
github.com/spf13/viper v1.10.1
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.8.0
|
||||||
github.com/testcontainers/testcontainers-go v0.12.0
|
github.com/testcontainers/testcontainers-go v0.15.0
|
||||||
github.com/yargevad/filepathx v1.0.0
|
github.com/yargevad/filepathx v1.0.0
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 // indirect
|
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||||
github.com/Microsoft/hcsshim v0.8.16 // indirect
|
github.com/Microsoft/hcsshim v0.9.4 // indirect
|
||||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d // indirect
|
||||||
github.com/andybalholm/brotli v1.0.4 // indirect
|
github.com/andybalholm/brotli v1.0.4 // indirect
|
||||||
github.com/cenkalti/backoff v2.2.1+incompatible // indirect
|
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
|
||||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 // indirect
|
github.com/containerd/cgroups v1.0.4 // indirect
|
||||||
github.com/containerd/containerd v1.5.0-beta.4 // indirect
|
github.com/containerd/containerd v1.6.8 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||||
github.com/docker/docker v20.10.11+incompatible // indirect
|
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||||
github.com/docker/go-connections v0.4.0 // indirect
|
github.com/docker/go-connections v0.4.0 // indirect
|
||||||
github.com/docker/go-units v0.4.0 // indirect
|
github.com/docker/go-units v0.5.0 // indirect
|
||||||
github.com/dsnet/compress v0.0.1 // indirect
|
github.com/dsnet/compress v0.0.1 // indirect
|
||||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||||
github.com/ghodss/yaml v1.0.0 // indirect
|
github.com/ghodss/yaml v1.0.0 // indirect
|
||||||
github.com/go-ole/go-ole v1.2.4 // indirect
|
github.com/go-ole/go-ole v1.2.4 // indirect
|
||||||
github.com/gogo/protobuf v1.3.2 // indirect
|
github.com/gogo/protobuf v1.3.2 // indirect
|
||||||
@ -52,18 +52,18 @@ require (
|
|||||||
github.com/jaypipes/pcidb v0.6.0 // indirect
|
github.com/jaypipes/pcidb v0.6.0 // indirect
|
||||||
github.com/klauspost/compress v1.13.6 // indirect
|
github.com/klauspost/compress v1.13.6 // indirect
|
||||||
github.com/klauspost/pgzip v1.2.5 // indirect
|
github.com/klauspost/pgzip v1.2.5 // indirect
|
||||||
github.com/magiconair/properties v1.8.5 // indirect
|
github.com/magiconair/properties v1.8.6 // indirect
|
||||||
github.com/mattn/go-runewidth v0.0.9 // indirect
|
github.com/mattn/go-runewidth v0.0.9 // indirect
|
||||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||||
github.com/moby/sys/mount v0.2.0 // indirect
|
github.com/moby/sys/mount v0.3.3 // indirect
|
||||||
github.com/moby/sys/mountinfo v0.5.0 // indirect
|
github.com/moby/sys/mountinfo v0.6.2 // indirect
|
||||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
|
||||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect
|
github.com/morikuni/aec v1.0.0 // indirect
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 // indirect
|
||||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 // indirect
|
||||||
github.com/opencontainers/runc v1.0.2 // indirect
|
github.com/opencontainers/runc v1.1.3 // indirect
|
||||||
github.com/paulmach/orb v0.4.0 // indirect
|
github.com/paulmach/orb v0.4.0 // indirect
|
||||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||||
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
github.com/pierrec/lz4/v4 v4.1.14 // indirect
|
||||||
@ -79,12 +79,12 @@ require (
|
|||||||
go.opencensus.io v0.23.0 // indirect
|
go.opencensus.io v0.23.0 // indirect
|
||||||
go.opentelemetry.io/otel v1.4.1 // indirect
|
go.opentelemetry.io/otel v1.4.1 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.4.1 // indirect
|
go.opentelemetry.io/otel/trace v1.4.1 // indirect
|
||||||
golang.org/x/net v0.0.0-20211108170745-6635138e15ea // indirect
|
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 // indirect
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.8 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad // indirect
|
||||||
google.golang.org/grpc v1.43.0 // indirect
|
google.golang.org/grpc v1.47.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
google.golang.org/protobuf v1.28.0 // indirect
|
||||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb // indirect
|
||||||
|
@ -51,8 +51,9 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
|||||||
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
|
||||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||||
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
|
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||||
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||||
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||||
@ -71,8 +72,6 @@ github.com/ClickHouse/clickhouse-go/v2 v2.0.12/go.mod h1:u4RoNQLLM2W6hNSPYrIESLJ
|
|||||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||||
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
|
||||||
github.com/Flaque/filet v0.0.0-20201012163910-45f684403088 h1:PnnQln5IGbhLeJOi6hVs+lCeF+B1dRfFKPGXUAez0Ww=
|
|
||||||
github.com/Flaque/filet v0.0.0-20201012163910-45f684403088/go.mod h1:TK+jB3mBs+8ZMWhU5BqZKnZWJ1MrLo8etNVg51ueTBo=
|
|
||||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||||
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
|
||||||
@ -80,21 +79,28 @@ github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jB
|
|||||||
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw=
|
||||||
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
github.com/Microsoft/go-winio v0.4.16-0.20201130162521-d1ffc52c7331/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||||
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
|
||||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3 h1:mw6pDQqv38/WGF1cO/jF5t/jyAJ2yi7CmtFLLO5tGFI=
|
|
||||||
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
|
github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
|
github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
|
||||||
|
github.com/Microsoft/go-winio v0.5.2 h1:a9IhgEQBCUEk6QCdml9CiJGhAws+YwffDHEMp1VMrpA=
|
||||||
|
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||||
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
|
||||||
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
|
||||||
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8=
|
||||||
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
|
||||||
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
|
||||||
github.com/Microsoft/hcsshim v0.8.16 h1:8/auA4LFIZFTGrqfKhGBSXwM6/4X1fHa/xniyEHu8ac=
|
|
||||||
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
|
||||||
|
github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.4 h1:mnUj0ivWy6UzbB1uLFqKR6F+ZyiDc7j4iGgHTpO+5+I=
|
||||||
|
github.com/Microsoft/hcsshim v0.9.4/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
|
||||||
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
|
||||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
|
github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||||
|
github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
|
||||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
|
||||||
@ -108,6 +114,7 @@ github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY
|
|||||||
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig=
|
||||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||||
|
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
|
||||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||||
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
github.com/armon/go-metrics v0.3.10/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc=
|
||||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||||
@ -132,22 +139,28 @@ github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7
|
|||||||
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
|
||||||
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
|
||||||
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
|
||||||
github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4=
|
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||||
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
|
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||||
|
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||||
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
github.com/cilium/ebpf v0.0.0-20200110133405-4032b1d8aae3/go.mod h1:MA5e5Lr8slmEg9bt0VpxxWqJlO4iwu3FBdHUzV7wQVg=
|
||||||
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
|
||||||
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
|
||||||
|
github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
|
||||||
|
github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
|
||||||
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
|
||||||
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
|
||||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||||
@ -166,20 +179,25 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z
|
|||||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||||
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||||
|
github.com/containerd/aufs v1.0.0/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||||
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
|
github.com/containerd/btrfs v0.0.0-20201111183144-404b9149801e/go.mod h1:jg2QkJcsabfHugurUvvPhS3E08Oxiuh5W/g1ybB4e0E=
|
||||||
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
github.com/containerd/btrfs v0.0.0-20210316141732-918d888fb676/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||||
|
github.com/containerd/btrfs v1.0.0/go.mod h1:zMcX3qkXTAi9GI50+0HOeuV8LU2ryCE/V2vG/ZBiTss=
|
||||||
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
|
github.com/containerd/cgroups v0.0.0-20190717030353-c4b9ac5c7601/go.mod h1:X9rLEHIqSf/wfK8NsPqxJmeZgW4pcfzdXITDrUSJ6uI=
|
||||||
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko=
|
||||||
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
github.com/containerd/cgroups v0.0.0-20200531161412-0dbf7f05ba59/go.mod h1:pA0z1pT8KYB3TCXK/ocprsh7MAkoW8bZVzPdih9snmM=
|
||||||
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||||
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
|
||||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68 h1:hkGVFjz+plgr5UfxZUTPFbUFIF/Km6/s+RVRIRHLrrY=
|
|
||||||
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
|
||||||
|
github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
|
||||||
|
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
||||||
|
github.com/containerd/cgroups v1.0.4/go.mod h1:nLNQtsF7Sl2HxNebu77i1R0oDlhiTG+kO4JTrUzo6IA=
|
||||||
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||||
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
|
||||||
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
|
||||||
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
|
||||||
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
|
||||||
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
@ -190,47 +208,68 @@ github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX
|
|||||||
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
|
||||||
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
|
github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
|
||||||
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
|
github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
|
||||||
github.com/containerd/containerd v1.5.0-beta.4 h1:zjz4MOAOFgdBlwid2nNUlJ3YLpVi/97L36lfMYJex60=
|
|
||||||
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
|
||||||
|
github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
|
||||||
|
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||||
|
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||||
|
github.com/containerd/containerd v1.6.8 h1:h4dOFDwzHmqFEP754PgfgTeVXFnLiRc6kiqC7tplDJs=
|
||||||
|
github.com/containerd/containerd v1.6.8/go.mod h1:By6p5KqPK0/7/CgO/A6t/Gz+CUYUu2zf1hUaaymVXB0=
|
||||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||||
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cECdGN1O8G9bgKTlLhuPJimka6Xb/Gg7vYzCTNVxhvo=
|
||||||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
||||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e h1:6JKvHHt396/qabvMhnhUZvWaHZzfVfldxE60TK8YLhg=
|
|
||||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||||
|
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||||
|
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||||
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
github.com/containerd/fifo v0.0.0-20201026212402-0724c46b320c/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||||
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
github.com/containerd/fifo v0.0.0-20210316144830-115abcc95a1d/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||||
|
github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
|
||||||
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
|
||||||
|
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||||
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
github.com/containerd/go-runc v0.0.0-20201020171139-16b287bc67d0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||||
|
github.com/containerd/go-runc v1.0.0/go.mod h1:cNU0ZbCgCQVZK4lgG3P+9tn9/PaJNmoDXPpoJhDR+Ok=
|
||||||
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
|
github.com/containerd/imgcrypt v1.0.1/go.mod h1:mdd8cEPW7TPgNG4FpuP3sGBiQ7Yi/zak9TYCG3juvb0=
|
||||||
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
|
||||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||||
|
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||||
|
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||||
|
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||||
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||||
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
|
||||||
|
github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
|
||||||
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
|
||||||
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
|
||||||
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
|
||||||
|
github.com/containerd/typeurl v1.0.2/go.mod h1:9trJWW2sRlGub4wZJRTW83VtbOLS6hwcDZXTn6oPz9s=
|
||||||
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
github.com/containerd/zfs v0.0.0-20200918131355-0a33824f23a2/go.mod h1:8IgZOBdv8fAgXddBT4dBXJPtxyRsejFIpXoklgxgEjw=
|
||||||
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
github.com/containerd/zfs v0.0.0-20210301145711-11e8f1707f62/go.mod h1:A9zfAbMlQwE+/is6hi0Xw8ktpL+6glmqZYtevJgaB8Y=
|
||||||
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
github.com/containerd/zfs v0.0.0-20210315114300-dde8f0fda960/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||||
|
github.com/containerd/zfs v0.0.0-20210324211415-d5c4544f0433/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||||
|
github.com/containerd/zfs v1.0.0/go.mod h1:m+m51S1DvAP6r3FcmYCp54bQ34pyOwTieQDNRIRHsFY=
|
||||||
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
|
github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
|
||||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||||
|
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||||
|
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||||
|
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||||
|
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||||
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
|
github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
|
||||||
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
@ -246,9 +285,11 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma
|
|||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||||
|
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
|
||||||
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||||
|
github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
|
||||||
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
|
||||||
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
|
||||||
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
|
||||||
@ -259,21 +300,28 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
|
|||||||
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
|
||||||
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
|
||||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||||
|
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
|
|
||||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v20.10.11+incompatible h1:OqzI/g/W54LczvhnccGqniFoQghHx3pklbLuhfXpqGo=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
|
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
|
||||||
|
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
|
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||||
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
|
||||||
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
|
||||||
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||||
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
|
||||||
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI=
|
||||||
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw=
|
||||||
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
|
|
||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
|
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
|
||||||
|
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
|
||||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||||
@ -296,6 +344,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m
|
|||||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||||
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
|
github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
|
||||||
|
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||||
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
|
||||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||||
@ -306,8 +355,9 @@ github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
|
|||||||
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
|
||||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
|
||||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||||
|
github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI=
|
||||||
|
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
|
||||||
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
|
||||||
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
|
||||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||||
@ -327,24 +377,27 @@ github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbV
|
|||||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||||
|
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
|
||||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
|
github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg=
|
||||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
|
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
|
||||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||||
|
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
|
||||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
|
||||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
|
||||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||||
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
|
||||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
|
||||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||||
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||||
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
|
||||||
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
|
||||||
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
|
github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
|
||||||
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU=
|
||||||
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c=
|
||||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||||
@ -356,6 +409,7 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
|||||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
@ -405,8 +459,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
|
||||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
|
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||||
|
github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
@ -434,6 +489,7 @@ github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3
|
|||||||
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
|
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||||
@ -445,14 +501,17 @@ github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8
|
|||||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||||
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||||
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
|
||||||
github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I=
|
|
||||||
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
|
github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
|
||||||
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
|
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
|
||||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||||
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||||
@ -496,6 +555,7 @@ github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJ
|
|||||||
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||||
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
|
github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||||
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
|
||||||
@ -507,6 +567,7 @@ github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJS
|
|||||||
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
|
||||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||||
|
github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
|
||||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||||
@ -524,6 +585,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
|
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||||
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc=
|
||||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||||
@ -536,16 +598,20 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
|||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
|
||||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
|
||||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||||
|
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||||
|
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||||
|
github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
|
||||||
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
|
||||||
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
|
github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo=
|
||||||
|
github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||||
|
github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||||
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
github.com/mailru/easyjson v0.7.0/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs=
|
||||||
@ -568,9 +634,11 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
|
|||||||
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
|
||||||
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
|
||||||
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
|
github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
|
||||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||||
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||||
|
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||||
github.com/mholt/archiver/v4 v4.0.0-alpha.4 h1:QJ4UuWgavPynEX3LXxClHDRGzYcgcvTtAMp8az7spuw=
|
github.com/mholt/archiver/v4 v4.0.0-alpha.4 h1:QJ4UuWgavPynEX3LXxClHDRGzYcgcvTtAMp8az7spuw=
|
||||||
github.com/mholt/archiver/v4 v4.0.0-alpha.4/go.mod h1:J7SYS/UTAtnO3I49RQEf+2FYZVwo7XBOh9Im43VrjNs=
|
github.com/mholt/archiver/v4 v4.0.0-alpha.4/go.mod h1:J7SYS/UTAtnO3I49RQEf+2FYZVwo7XBOh9Im43VrjNs=
|
||||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||||
@ -589,44 +657,56 @@ github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGg
|
|||||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||||
github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM=
|
github.com/mkevac/debugcharts v0.0.0-20191222103121-ae1c48aa8615/go.mod h1:Ad7oeElCZqA1Ufj0U9/liOF4BtVepxRcTvr2ey7zTvM=
|
||||||
github.com/moby/sys/mount v0.2.0 h1:WhCW5B355jtxndN5ovugJlMFJawbUODuW8fSnEH6SSM=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/sys/mount v0.2.0/go.mod h1:aAivFE2LB3W4bACsUXChRHQ0qKWsetY4Y9V7sxOougM=
|
github.com/moby/sys/mount v0.3.3 h1:fX1SVkXFJ47XWDoeFW4Sq7PdQJnV2QIDZAqjNqgEjUs=
|
||||||
|
github.com/moby/sys/mount v0.3.3/go.mod h1:PBaEorSNTLG5t/+4EgukEQVlAvVEc6ZjTySwKdqp5K0=
|
||||||
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
|
||||||
github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI=
|
|
||||||
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
|
||||||
|
github.com/moby/sys/mountinfo v0.6.2 h1:BzJjoreD5BMFNmD9Rus6gdd1pLuecOFPt8wC+Vygl78=
|
||||||
|
github.com/moby/sys/mountinfo v0.6.2/go.mod h1:IJb6JQeOklcdMU9F5xQ8ZALD+CUr5VlGpwtX+VE0rpI=
|
||||||
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
|
||||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
|
||||||
github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
|
github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c h1:nXxl5PrvVm2L/wCy8dQu6DMTwH4oIuGN8GJDAlqDdVE=
|
github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
|
||||||
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
|
||||||
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||||
|
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.2 h1:e3mzJFJs4k83GXBEiTaQ5HgSc/kOK8q0rDaRO0MPaOk=
|
||||||
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
|
github.com/nwaples/rardecode/v2 v2.0.0-beta.2/go.mod h1:yntwv/HfMc/Hbvtq9I19D1n58te3h6KsqCf3GxyfBGY=
|
||||||
|
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||||
|
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||||
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo=
|
||||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||||
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
|
||||||
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||||
|
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
|
||||||
|
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||||
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||||
|
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||||
|
github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
|
||||||
|
github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
|
||||||
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
|
||||||
@ -634,15 +714,17 @@ github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.
|
|||||||
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
|
||||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
|
|
||||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
|
||||||
|
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||||
github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg=
|
|
||||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||||
|
github.com/opencontainers/runc v1.1.3 h1:vIXrkId+0/J2Ymu2m7VjGvbSlAId9XNRPhn2p4b+d8w=
|
||||||
|
github.com/opencontainers/runc v1.1.3/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
@ -653,11 +735,14 @@ github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mo
|
|||||||
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
|
||||||
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
|
||||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||||
|
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||||
github.com/paulmach/orb v0.4.0 h1:ilp1MQjRapLJ1+qcays1nZpe0mvkCY+b8JU/qBKRZ1A=
|
github.com/paulmach/orb v0.4.0 h1:ilp1MQjRapLJ1+qcays1nZpe0mvkCY+b8JU/qBKRZ1A=
|
||||||
github.com/paulmach/orb v0.4.0/go.mod h1:FkcWtplUAIVqAuhAOV2d3rpbnQyliDOjOcLW9dUrfdU=
|
github.com/paulmach/orb v0.4.0/go.mod h1:FkcWtplUAIVqAuhAOV2d3rpbnQyliDOjOcLW9dUrfdU=
|
||||||
github.com/paulmach/protoscan v0.2.1-0.20210522164731-4e53c6875432/go.mod h1:2sV+uZ/oQh66m4XJVZm5iqUZ62BN88Ex1E+TTS0nLzI=
|
github.com/paulmach/protoscan v0.2.1-0.20210522164731-4e53c6875432/go.mod h1:2sV+uZ/oQh66m4XJVZm5iqUZ62BN88Ex1E+TTS0nLzI=
|
||||||
|
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||||
|
github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
|
||||||
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM=
|
||||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||||
@ -678,6 +763,7 @@ github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSg
|
|||||||
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||||
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.0.0-20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||||
|
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
|
||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
@ -688,12 +774,15 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
|||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||||
|
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
|
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
|
||||||
@ -702,6 +791,7 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx
|
|||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
@ -714,8 +804,10 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb
|
|||||||
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
|
||||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||||
|
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
|
||||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||||
|
github.com/seccomp/libseccomp-golang v0.9.2-0.20220502022130-f33da4d89646/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
|
||||||
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
|
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
|
||||||
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
|
||||||
@ -734,17 +826,21 @@ github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1
|
|||||||
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
|
||||||
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
|
||||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||||
|
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||||
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
|
||||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||||
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
github.com/spf13/afero v1.8.0 h1:5MmtuhAgYeU6qpa7w7bP0dv6MBYuup0vekhSpSkoq60=
|
||||||
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
github.com/spf13/afero v1.8.0/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo=
|
||||||
|
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=
|
||||||
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||||
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||||
|
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||||
|
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
@ -754,6 +850,7 @@ github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn
|
|||||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||||
|
github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
|
||||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||||
@ -762,27 +859,31 @@ github.com/stretchr/objx v0.0.0-20180129172003-8a3f7159479f/go.mod h1:HFkY916IF+
|
|||||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||||
|
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||||
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v0.0.0-20180303142811-b89eecf5ca5d/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
|
||||||
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=
|
||||||
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
|
||||||
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
github.com/tchap/go-patricia v2.2.6+incompatible/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I=
|
||||||
github.com/testcontainers/testcontainers-go v0.12.0 h1:SK0NryGHIx7aifF6YqReORL18aGAA4bsDPtikDVCEyg=
|
github.com/testcontainers/testcontainers-go v0.15.0 h1:3Ex7PUGFv0b2bBsdOv6R42+SK2qoZnWBd21LvZYhUtQ=
|
||||||
github.com/testcontainers/testcontainers-go v0.12.0/go.mod h1:SIndOQXZng0IW8iWU1Js0ynrfZ8xcxrTtDfF6rD2pxs=
|
github.com/testcontainers/testcontainers-go v0.15.0/go.mod h1:PkohMRH2X8Hib0IWtifVexDfLPVT+tb5E9hsf7cW12w=
|
||||||
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
|
github.com/therootcompany/xz v1.0.1 h1:CmOtsn1CbtmyYiusbfmhmkpAAETj0wBIH6kCYaX+xzw=
|
||||||
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
|
github.com/therootcompany/xz v1.0.1/go.mod h1:3K3UH1yCKgBneZYhuQUvJ9HPD19UEXEI0BWbMn8qNMY=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
|
||||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||||
|
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||||
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
|
||||||
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
github.com/ulikunitz/xz v0.5.10 h1:t92gobL9l3HE202wg3rlk19F6X+JOxl9BBrCCMYEYd8=
|
||||||
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
github.com/ulikunitz/xz v0.5.10/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14=
|
||||||
@ -792,14 +893,17 @@ github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtX
|
|||||||
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0=
|
||||||
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
|
||||||
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
|
||||||
|
github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
|
||||||
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
|
||||||
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
|
||||||
|
github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
|
||||||
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
|
||||||
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI=
|
||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU=
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||||
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
|
||||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||||
|
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||||
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
github.com/yargevad/filepathx v1.0.0 h1:SYcT+N3tYGi+NvazubCNlvgIPbzAk7i7y2dwg3I5FYc=
|
||||||
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
github.com/yargevad/filepathx v1.0.0/go.mod h1:BprfX/gpYNJHJfc35GjRRpVcwWXS89gGulUIU5tK3tA=
|
||||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||||
@ -811,8 +915,10 @@ github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
|||||||
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
|
||||||
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
|
||||||
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
|
||||||
|
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||||
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
|
||||||
|
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||||
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
|
||||||
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
go.etcd.io/etcd/api/v3 v3.5.1/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||||
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
go.etcd.io/etcd/client/pkg/v3 v3.5.1/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||||
@ -853,6 +959,7 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
|||||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
|
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||||
@ -931,6 +1038,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
|
|||||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
|
golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||||
@ -944,8 +1052,9 @@ golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy
|
|||||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211108170745-6635138e15ea h1:FosBMXtOc8Tp9Hbo4ltl1WJSrTVewZU8MPnTPY2HdH8=
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20211108170745-6635138e15ea/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9 h1:Yqz/iviulwKwAREEeUd3nbBFn0XuyJqkoft2IlrvOhc=
|
||||||
|
golang.org/x/net v0.0.0-20220617184016-355a448f1bc9/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -996,10 +1105,12 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1022,6 +1133,7 @@ golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1035,13 +1147,14 @@ golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||||||
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200817155316-9781c653f443/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
|
||||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
@ -1070,16 +1183,20 @@ golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211109184856-51b60fd695b3/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
|
||||||
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211110154304-99a53858aa08/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||||
|
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
@ -1089,16 +1206,19 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
|
||||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
|
golang.org/x/text v0.3.8 h1:nAL+RVCQ9uMn3vJZbV+MRnydTJFPf8qqY42YiA6MrqY=
|
||||||
|
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
|
||||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||||
|
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
|
||||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
|
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||||
@ -1114,6 +1234,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
|
golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
@ -1137,14 +1258,17 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
|
|||||||
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||||
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
|
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||||
|
golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
@ -1161,7 +1285,6 @@ golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
|||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||||
@ -1232,6 +1355,7 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG
|
|||||||
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
|
google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
@ -1271,8 +1395,9 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6
|
|||||||
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0=
|
|
||||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
|
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad h1:kqrS+lhvaMHCxul6sKQvKJ8nAAhlVItmZV822hYFH/U=
|
||||||
|
google.golang.org/genproto v0.0.0-20220617124728-180714bec0ad/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
@ -1304,8 +1429,8 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
|||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
|
||||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
@ -1319,15 +1444,17 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
|||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
|
||||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
|
||||||
|
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
|
||||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||||
@ -1352,13 +1479,14 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
|||||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||||
|
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
|
||||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||||
|
gotest.tools/v3 v3.4.0 h1:ZazjZUfuVeZGLAmlKKuyv3IKP5orXcwtOwDQH6YVr6o=
|
||||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||||
@ -1369,15 +1497,32 @@ honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
|
|||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M=
|
||||||
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0=
|
||||||
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
|
||||||
|
k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
|
||||||
|
k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
|
||||||
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||||
|
k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||||
|
k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
|
||||||
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
|
||||||
|
k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
|
||||||
|
k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
|
||||||
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
|
||||||
|
k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
|
||||||
|
k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
|
||||||
|
k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
|
||||||
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
|
||||||
|
k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
|
||||||
|
k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
|
||||||
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
|
||||||
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||||
|
k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
|
||||||
|
k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
|
||||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
|
k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||||
|
k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||||
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
|
k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
|
||||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||||
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
|
||||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||||
@ -1385,6 +1530,9 @@ rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8
|
|||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||||
|
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
|
sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||||
|
@ -7,23 +7,22 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/data"
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/database"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/database"
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainers.Container, nat.Port) {
|
||||||
// create a ClickHouse container
|
// create a ClickHouse container
|
||||||
ctx := context.Background()
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// can't test without container
|
// can't test without current directory
|
||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -32,9 +31,19 @@ func TestMain(m *testing.M) {
|
|||||||
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
||||||
ExposedPorts: []string{"9000/tcp"},
|
ExposedPorts: []string{"9000/tcp"},
|
||||||
WaitingFor: wait.ForLog("Ready for connections"),
|
WaitingFor: wait.ForLog("Ready for connections"),
|
||||||
BindMounts: map[string]string{
|
Mounts: testcontainers.ContainerMounts{
|
||||||
"/etc/clickhouse-server/config.d/custom.xml": path.Join(cwd, "../../../testdata/docker/custom.xml"),
|
{
|
||||||
"/etc/clickhouse-server/users.d/admin.xml": path.Join(cwd, "../../../testdata/docker/admin.xml"),
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../../../testdata/docker/custom.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/config.d/custom.xml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../../../testdata/docker/admin.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/users.d/admin.xml",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
@ -47,17 +56,17 @@ func TestMain(m *testing.M) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
||||||
|
if err != nil {
|
||||||
|
// can't test without container's port
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
os.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
||||||
defer clickhouseContainer.Terminate(ctx) //nolint
|
|
||||||
os.Exit(m.Run())
|
return clickhouseContainer, p
|
||||||
}
|
}
|
||||||
|
|
||||||
func getClient(t *testing.T) *database.ClickhouseNativeClient {
|
func getClient(t *testing.T, mappedPort int) *database.ClickhouseNativeClient {
|
||||||
mappedPort, err := strconv.Atoi(os.Getenv("CLICKHOUSE_DB_PORT"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Unable to read port value from environment")
|
|
||||||
}
|
|
||||||
clickhouseClient, err := database.NewNativeClient("localhost", uint16(mappedPort), "", "")
|
clickhouseClient, err := database.NewNativeClient("localhost", uint16(mappedPort), "", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to build client : %v", err)
|
t.Fatalf("unable to build client : %v", err)
|
||||||
@ -66,7 +75,11 @@ func getClient(t *testing.T) *database.ClickhouseNativeClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTableNamesForDatabase(t *testing.T) {
|
func TestReadTableNamesForDatabase(t *testing.T) {
|
||||||
clickhouseClient := getClient(t)
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
t.Run("client can read tables for a database", func(t *testing.T) {
|
t.Run("client can read tables for a database", func(t *testing.T) {
|
||||||
tables, err := clickhouseClient.ReadTableNamesForDatabase("system")
|
tables, err := clickhouseClient.ReadTableNamesForDatabase("system")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@ -76,12 +89,17 @@ func TestReadTableNamesForDatabase(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestReadTable(t *testing.T) {
|
func TestReadTable(t *testing.T) {
|
||||||
clickhouseClient := getClient(t)
|
|
||||||
t.Run("client can get all rows for system.disks table", func(t *testing.T) {
|
t.Run("client can get all rows for system.disks table", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
// we read the table system.disks as this should contain only 1 row
|
// we read the table system.disks as this should contain only 1 row
|
||||||
frame, err := clickhouseClient.ReadTable("system", "disks", []string{}, data.OrderBy{}, 10)
|
frame, err := clickhouseClient.ReadTable("system", "disks", []string{}, data.OrderBy{}, 10)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.ElementsMatch(t, frame.Columns(), [7]string{"name", "path", "free_space", "total_space", "keep_free_space", "type", "cache_path"})
|
require.ElementsMatch(t, frame.Columns(), [9]string{"name", "path", "free_space", "total_space", "unreserved_space", "keep_free_space", "type", "is_encrypted", "cache_path"})
|
||||||
i := 0
|
i := 0
|
||||||
for {
|
for {
|
||||||
values, ok, err := frame.Next()
|
values, ok, err := frame.Next()
|
||||||
@ -92,8 +110,11 @@ func TestReadTable(t *testing.T) {
|
|||||||
require.Equal(t, "/var/lib/clickhouse/", values[1])
|
require.Equal(t, "/var/lib/clickhouse/", values[1])
|
||||||
require.Greater(t, values[2], uint64(0))
|
require.Greater(t, values[2], uint64(0))
|
||||||
require.Greater(t, values[3], uint64(0))
|
require.Greater(t, values[3], uint64(0))
|
||||||
require.Equal(t, values[4], uint64(0))
|
require.Greater(t, values[4], uint64(0))
|
||||||
require.Equal(t, "local", values[5])
|
require.Equal(t, values[5], uint64(0))
|
||||||
|
require.Equal(t, "local", values[6])
|
||||||
|
require.Equal(t, values[7], uint8(0))
|
||||||
|
require.Equal(t, values[8], "")
|
||||||
} else {
|
} else {
|
||||||
require.False(t, ok)
|
require.False(t, ok)
|
||||||
break
|
break
|
||||||
@ -103,6 +124,12 @@ func TestReadTable(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("client can get all rows for system.databases table", func(t *testing.T) {
|
t.Run("client can get all rows for system.databases table", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
// we read the table system.databases as this should be small and consistent on fresh db instances
|
// we read the table system.databases as this should be small and consistent on fresh db instances
|
||||||
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 10)
|
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 10)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@ -133,12 +160,24 @@ func TestReadTable(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("client can get all rows for system.databases table with except", func(t *testing.T) {
|
t.Run("client can get all rows for system.databases table with except", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
frame, err := clickhouseClient.ReadTable("system", "databases", []string{"data_path", "comment"}, data.OrderBy{}, 10)
|
frame, err := clickhouseClient.ReadTable("system", "databases", []string{"data_path", "comment"}, data.OrderBy{}, 10)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.ElementsMatch(t, frame.Columns(), [4]string{"name", "engine", "metadata_path", "uuid"})
|
require.ElementsMatch(t, frame.Columns(), [4]string{"name", "engine", "metadata_path", "uuid"})
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("client can limit rows for system.databases", func(t *testing.T) {
|
t.Run("client can limit rows for system.databases", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 1)
|
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{}, 1)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"})
|
require.ElementsMatch(t, frame.Columns(), [6]string{"name", "engine", "data_path", "metadata_path", "uuid", "comment"})
|
||||||
@ -164,6 +203,12 @@ func TestReadTable(t *testing.T) {
|
|||||||
})
|
})
|
||||||
|
|
||||||
t.Run("client can order rows for system.databases", func(t *testing.T) {
|
t.Run("client can order rows for system.databases", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{
|
frame, err := clickhouseClient.ReadTable("system", "databases", []string{}, data.OrderBy{
|
||||||
Column: "engine",
|
Column: "engine",
|
||||||
Order: data.Asc,
|
Order: data.Asc,
|
||||||
@ -199,8 +244,13 @@ func TestReadTable(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestExecuteStatement(t *testing.T) {
|
func TestExecuteStatement(t *testing.T) {
|
||||||
clickhouseClient := getClient(t)
|
|
||||||
t.Run("client can execute any statement", func(t *testing.T) {
|
t.Run("client can execute any statement", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
statement := "SELECT path, count(*) as count FROM system.disks GROUP BY path;"
|
statement := "SELECT path, count(*) as count FROM system.disks GROUP BY path;"
|
||||||
frame, err := clickhouseClient.ExecuteStatement("engines", statement)
|
frame, err := clickhouseClient.ExecuteStatement("engines", statement)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
@ -225,8 +275,13 @@ func TestExecuteStatement(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestVersion(t *testing.T) {
|
func TestVersion(t *testing.T) {
|
||||||
clickhouseClient := getClient(t)
|
|
||||||
t.Run("client can read version", func(t *testing.T) {
|
t.Run("client can read version", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
clickhouseClient := getClient(t, mappedPort.Int())
|
||||||
|
|
||||||
version, err := clickhouseClient.Version()
|
version, err := clickhouseClient.Version()
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.NotEmpty(t, version)
|
require.NotEmpty(t, version)
|
||||||
|
@ -7,19 +7,18 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform"
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||||
|
"github.com/docker/go-connections/nat"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
// create a ClickHouse container
|
||||||
// create a ClickHouse container
|
func createClickHouseContainer(t *testing.T, ctx context.Context) (testcontainers.Container, nat.Port) {
|
||||||
ctx := context.Background()
|
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("unable to read current directory", err)
|
fmt.Println("unable to read current directory", err)
|
||||||
@ -30,9 +29,19 @@ func TestMain(m *testing.M) {
|
|||||||
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
||||||
ExposedPorts: []string{"9000/tcp"},
|
ExposedPorts: []string{"9000/tcp"},
|
||||||
WaitingFor: wait.ForLog("Ready for connections"),
|
WaitingFor: wait.ForLog("Ready for connections"),
|
||||||
BindMounts: map[string]string{
|
Mounts: testcontainers.ContainerMounts{
|
||||||
"/etc/clickhouse-server/config.d/custom.xml": path.Join(cwd, "../../testdata/docker/custom.xml"),
|
{
|
||||||
"/etc/clickhouse-server/users.d/admin.xml": path.Join(cwd, "../../testdata/docker/admin.xml"),
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../../testdata/docker/custom.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/config.d/custom.xml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../../testdata/docker/admin.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/users.d/admin.xml",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
@ -44,29 +53,35 @@ func TestMain(m *testing.M) {
|
|||||||
panic(err)
|
panic(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
p, err := clickhouseContainer.MappedPort(ctx, "9000")
|
||||||
|
if err != nil {
|
||||||
|
// can't test without a port
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
os.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
return clickhouseContainer, p
|
||||||
|
|
||||||
defer clickhouseContainer.Terminate(ctx) //nolint
|
|
||||||
os.Exit(m.Run())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConnect(t *testing.T) {
|
func TestConnect(t *testing.T) {
|
||||||
mappedPort, err := strconv.Atoi(os.Getenv("CLICKHOUSE_DB_PORT"))
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Unable to read port value from environment")
|
|
||||||
}
|
|
||||||
t.Run("can only connect once", func(t *testing.T) {
|
t.Run("can only connect once", func(t *testing.T) {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
clickhouseContainer, mappedPort := createClickHouseContainer(t, ctx)
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
t.Setenv("CLICKHOUSE_DB_PORT", mappedPort.Port())
|
||||||
|
|
||||||
|
port := mappedPort.Int()
|
||||||
|
|
||||||
// get before connection
|
// get before connection
|
||||||
manager := platform.GetResourceManager()
|
manager := platform.GetResourceManager()
|
||||||
require.Nil(t, manager.DbClient)
|
require.Nil(t, manager.DbClient)
|
||||||
// init connection
|
// init connection
|
||||||
err = manager.Connect("localhost", uint16(mappedPort), "", "")
|
err := manager.Connect("localhost", uint16(port), "", "")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
require.NotNil(t, manager.DbClient)
|
require.NotNil(t, manager.DbClient)
|
||||||
// try and re-fetch connection
|
// try and re-fetch connection
|
||||||
err = manager.Connect("localhost", uint16(mappedPort), "", "")
|
err = manager.Connect("localhost", uint16(port), "", "")
|
||||||
require.NotNil(t, err)
|
require.NotNil(t, err)
|
||||||
require.Equal(t, "connect can only be called once", err.Error())
|
require.Equal(t, "connect can only be called once", err.Error())
|
||||||
})
|
})
|
||||||
|
@ -5,70 +5,93 @@ package utils_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/test"
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal/platform/utils"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"github.com/testcontainers/testcontainers-go"
|
"github.com/testcontainers/testcontainers-go"
|
||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func getProcessesInContainer(t *testing.T, container testcontainers.Container) ([]string, error) {
|
||||||
// create a ClickHouse container
|
result, reader, err := container.Exec(context.Background(), []string{"ps", "-aux"})
|
||||||
ctx := context.Background()
|
|
||||||
cwd, err := os.Getwd()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Println("unable to read current directory", err)
|
return nil, err
|
||||||
os.Exit(1)
|
|
||||||
}
|
}
|
||||||
// for now, we test against a hardcoded database-server version but we should make this a property
|
require.Zero(t, result)
|
||||||
req := testcontainers.ContainerRequest{
|
require.NotNil(t, reader)
|
||||||
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
|
||||||
ExposedPorts: []string{"9000/tcp"},
|
b, err := io.ReadAll(reader)
|
||||||
WaitingFor: wait.ForLog("Ready for connections"),
|
|
||||||
BindMounts: map[string]string{
|
|
||||||
"/etc/clickhouse-server/config.d/custom.xml": path.Join(cwd, "../../../testdata/docker/custom.xml"),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
|
||||||
ContainerRequest: req,
|
|
||||||
Started: true,
|
|
||||||
})
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// can't test without container
|
return nil, err
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
|
require.NotNil(t, b)
|
||||||
|
|
||||||
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
lines := strings.Split(string(b), "\n")
|
||||||
|
|
||||||
os.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
// discard PS header
|
||||||
|
return lines[1:], nil
|
||||||
defer clickhouseContainer.Terminate(ctx) //nolint
|
|
||||||
os.Exit(m.Run())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestFindClickHouseProcesses(t *testing.T) {
|
func TestFindClickHouseProcessesAndConfigs(t *testing.T) {
|
||||||
|
|
||||||
t.Run("can find ClickHouse processes", func(t *testing.T) {
|
t.Run("can find ClickHouse processes and configs", func(t *testing.T) {
|
||||||
processes, err := utils.FindClickHouseProcesses()
|
// create a ClickHouse container
|
||||||
|
ctx := context.Background()
|
||||||
|
cwd, err := os.Getwd()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Println("unable to read current directory", err)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// run a ClickHouse container that guarantees that it runs only for the duration of the test
|
||||||
|
req := testcontainers.ContainerRequest{
|
||||||
|
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
||||||
|
ExposedPorts: []string{"9000/tcp"},
|
||||||
|
WaitingFor: wait.ForLog("Ready for connections"),
|
||||||
|
Mounts: testcontainers.ContainerMounts{
|
||||||
|
{
|
||||||
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../../../testdata/docker/custom.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/config.d/custom.xml",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
|
ContainerRequest: req,
|
||||||
|
Started: true,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
// can't test without container
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
||||||
|
|
||||||
|
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
||||||
|
|
||||||
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
|
|
||||||
|
lines, err := getProcessesInContainer(t, clickhouseContainer)
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
// we might have clickhouse running locally during development as well as the above container so we allow 1 or more
|
require.NotEmpty(t, lines)
|
||||||
require.GreaterOrEqual(t, len(processes), 1)
|
|
||||||
require.Equal(t, processes[0].List[0], "/usr/bin/clickhouse-server")
|
for _, line := range lines {
|
||||||
// flexible as services/containers pass the config differently
|
parts := strings.Fields(line)
|
||||||
require.Contains(t, processes[0].List[1], "/etc/clickhouse-server/config.xml")
|
if len(parts) < 11 {
|
||||||
})
|
continue
|
||||||
}
|
}
|
||||||
|
if !strings.Contains(parts[10], "clickhouse-server") {
|
||||||
func TestFindConfigsFromClickHouseProcesses(t *testing.T) {
|
continue
|
||||||
|
}
|
||||||
t.Run("can find ClickHouse configs", func(t *testing.T) {
|
|
||||||
configs, err := utils.FindConfigsFromClickHouseProcesses()
|
require.Equal(t, "/usr/bin/clickhouse-server", parts[10])
|
||||||
require.Nil(t, err)
|
require.Equal(t, "--config-file=/etc/clickhouse-server/config.xml", parts[11])
|
||||||
require.GreaterOrEqual(t, len(configs), 1)
|
}
|
||||||
require.Equal(t, configs[0], "/etc/clickhouse-server/config.xml")
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@ import (
|
|||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"strconv"
|
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal"
|
"github.com/ClickHouse/ClickHouse/programs/diagnostics/internal"
|
||||||
@ -25,7 +24,8 @@ import (
|
|||||||
"github.com/testcontainers/testcontainers-go/wait"
|
"github.com/testcontainers/testcontainers-go/wait"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
// Execute a full default capture, with simple output, and check if a bundle is produced and it's not empty
|
||||||
|
func TestCapture(t *testing.T) {
|
||||||
// create a ClickHouse container
|
// create a ClickHouse container
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
cwd, err := os.Getwd()
|
cwd, err := os.Getwd()
|
||||||
@ -39,9 +39,19 @@ func TestMain(m *testing.M) {
|
|||||||
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
Image: fmt.Sprintf("clickhouse/clickhouse-server:%s", test.GetClickHouseTestVersion()),
|
||||||
ExposedPorts: []string{"9000/tcp"},
|
ExposedPorts: []string{"9000/tcp"},
|
||||||
WaitingFor: wait.ForLog("Ready for connections"),
|
WaitingFor: wait.ForLog("Ready for connections"),
|
||||||
BindMounts: map[string]string{
|
Mounts: testcontainers.ContainerMounts{
|
||||||
"/etc/clickhouse-server/config.d/custom.xml": path.Join(cwd, "../testdata/docker/custom.xml"),
|
{
|
||||||
"/etc/clickhouse-server/users.d/admin.xml": path.Join(cwd, "../testdata/docker/admin.xml"),
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../testdata/docker/custom.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/config.d/custom.xml",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Source: testcontainers.GenericBindMountSource{
|
||||||
|
HostPath: path.Join(cwd, "../testdata/docker/admin.xml"),
|
||||||
|
},
|
||||||
|
Target: "/etc/clickhouse-server/users.d/admin.xml",
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
clickhouseContainer, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
|
||||||
@ -55,18 +65,12 @@ func TestMain(m *testing.M) {
|
|||||||
|
|
||||||
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
p, _ := clickhouseContainer.MappedPort(ctx, "9000")
|
||||||
|
|
||||||
os.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
t.Setenv("CLICKHOUSE_DB_PORT", p.Port())
|
||||||
defer clickhouseContainer.Terminate(ctx) //nolint
|
defer clickhouseContainer.Terminate(ctx) //nolint
|
||||||
os.Exit(m.Run())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute a full default capture, with simple output, and check if a bundle is produced and it's not empty
|
|
||||||
func TestCapture(t *testing.T) {
|
|
||||||
tmrDir := t.TempDir()
|
tmrDir := t.TempDir()
|
||||||
port, err := strconv.ParseUint(os.Getenv("CLICKHOUSE_DB_PORT"), 10, 16)
|
port := p.Int()
|
||||||
if err != nil {
|
|
||||||
t.Fatal("Unable to read port value from environment")
|
|
||||||
}
|
|
||||||
// test a simple output exists
|
// test a simple output exists
|
||||||
_, err = outputs.GetOutputByName("simple")
|
_, err = outputs.GetOutputByName("simple")
|
||||||
require.Nil(t, err)
|
require.Nil(t, err)
|
||||||
|
@ -888,7 +888,7 @@ int mainEntryClickHouseInstall(int argc, char ** argv)
|
|||||||
|
|
||||||
namespace
|
namespace
|
||||||
{
|
{
|
||||||
int start(const std::string & user, const fs::path & executable, const fs::path & config, const fs::path & pid_file)
|
int start(const std::string & user, const fs::path & executable, const fs::path & config, const fs::path & pid_file, unsigned max_tries)
|
||||||
{
|
{
|
||||||
if (fs::exists(pid_file))
|
if (fs::exists(pid_file))
|
||||||
{
|
{
|
||||||
@ -939,8 +939,7 @@ namespace
|
|||||||
/// Wait to start.
|
/// Wait to start.
|
||||||
|
|
||||||
size_t try_num = 0;
|
size_t try_num = 0;
|
||||||
constexpr size_t num_tries = 60;
|
for (; try_num < max_tries; ++try_num)
|
||||||
for (; try_num < num_tries; ++try_num)
|
|
||||||
{
|
{
|
||||||
fmt::print("Waiting for server to start\n");
|
fmt::print("Waiting for server to start\n");
|
||||||
if (fs::exists(pid_file))
|
if (fs::exists(pid_file))
|
||||||
@ -951,7 +950,7 @@ namespace
|
|||||||
sleepForSeconds(1);
|
sleepForSeconds(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (try_num == num_tries)
|
if (try_num == max_tries)
|
||||||
{
|
{
|
||||||
fmt::print("Cannot start server. You can execute {} without --daemon option to run manually.\n", command);
|
fmt::print("Cannot start server. You can execute {} without --daemon option to run manually.\n", command);
|
||||||
|
|
||||||
@ -1052,7 +1051,7 @@ namespace
|
|||||||
return pid;
|
return pid;
|
||||||
}
|
}
|
||||||
|
|
||||||
int stop(const fs::path & pid_file, bool force, bool do_not_kill)
|
int stop(const fs::path & pid_file, bool force, bool do_not_kill, unsigned max_tries)
|
||||||
{
|
{
|
||||||
if (force && do_not_kill)
|
if (force && do_not_kill)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Specified flags are incompatible");
|
||||||
@ -1071,8 +1070,7 @@ namespace
|
|||||||
throwFromErrno(fmt::format("Cannot send {} signal", signal_name), ErrorCodes::SYSTEM_ERROR);
|
throwFromErrno(fmt::format("Cannot send {} signal", signal_name), ErrorCodes::SYSTEM_ERROR);
|
||||||
|
|
||||||
size_t try_num = 0;
|
size_t try_num = 0;
|
||||||
constexpr size_t num_tries = 60;
|
for (; try_num < max_tries; ++try_num)
|
||||||
for (; try_num < num_tries; ++try_num)
|
|
||||||
{
|
{
|
||||||
fmt::print("Waiting for server to stop\n");
|
fmt::print("Waiting for server to stop\n");
|
||||||
if (!isRunning(pid_file))
|
if (!isRunning(pid_file))
|
||||||
@ -1083,7 +1081,7 @@ namespace
|
|||||||
sleepForSeconds(1);
|
sleepForSeconds(1);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (try_num == num_tries)
|
if (try_num == max_tries)
|
||||||
{
|
{
|
||||||
if (do_not_kill)
|
if (do_not_kill)
|
||||||
{
|
{
|
||||||
@ -1136,6 +1134,7 @@ int mainEntryClickHouseStart(int argc, char ** argv)
|
|||||||
("config-path", po::value<std::string>()->default_value("etc/clickhouse-server"), "directory with configs")
|
("config-path", po::value<std::string>()->default_value("etc/clickhouse-server"), "directory with configs")
|
||||||
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
||||||
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||||
|
("max-tries", po::value<unsigned>()->default_value(60), "Max number of tries for waiting the server (with 1 second delay)")
|
||||||
;
|
;
|
||||||
|
|
||||||
po::variables_map options;
|
po::variables_map options;
|
||||||
@ -1153,8 +1152,9 @@ int mainEntryClickHouseStart(int argc, char ** argv)
|
|||||||
fs::path executable = prefix / options["binary-path"].as<std::string>() / "clickhouse-server";
|
fs::path executable = prefix / options["binary-path"].as<std::string>() / "clickhouse-server";
|
||||||
fs::path config = prefix / options["config-path"].as<std::string>() / "config.xml";
|
fs::path config = prefix / options["config-path"].as<std::string>() / "config.xml";
|
||||||
fs::path pid_file = prefix / options["pid-path"].as<std::string>() / "clickhouse-server.pid";
|
fs::path pid_file = prefix / options["pid-path"].as<std::string>() / "clickhouse-server.pid";
|
||||||
|
unsigned max_tries = options["max-tries"].as<unsigned>();
|
||||||
|
|
||||||
return start(user, executable, config, pid_file);
|
return start(user, executable, config, pid_file, max_tries);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -1175,6 +1175,7 @@ int mainEntryClickHouseStop(int argc, char ** argv)
|
|||||||
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
("pid-path", po::value<std::string>()->default_value("var/run/clickhouse-server"), "directory for pid file")
|
||||||
("force", po::bool_switch(), "Stop with KILL signal instead of TERM")
|
("force", po::bool_switch(), "Stop with KILL signal instead of TERM")
|
||||||
("do-not-kill", po::bool_switch(), "Do not send KILL even if TERM did not help")
|
("do-not-kill", po::bool_switch(), "Do not send KILL even if TERM did not help")
|
||||||
|
("max-tries", po::value<unsigned>()->default_value(60), "Max number of tries for waiting the server to finish after sending TERM (with 1 second delay)")
|
||||||
;
|
;
|
||||||
|
|
||||||
po::variables_map options;
|
po::variables_map options;
|
||||||
@ -1191,7 +1192,8 @@ int mainEntryClickHouseStop(int argc, char ** argv)
|
|||||||
|
|
||||||
bool force = options["force"].as<bool>();
|
bool force = options["force"].as<bool>();
|
||||||
bool do_not_kill = options["do-not-kill"].as<bool>();
|
bool do_not_kill = options["do-not-kill"].as<bool>();
|
||||||
return stop(pid_file, force, do_not_kill);
|
unsigned max_tries = options["max-tries"].as<unsigned>();
|
||||||
|
return stop(pid_file, force, do_not_kill, max_tries);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -1250,6 +1252,7 @@ int mainEntryClickHouseRestart(int argc, char ** argv)
|
|||||||
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
("user", po::value<std::string>()->default_value(DEFAULT_CLICKHOUSE_SERVER_USER), "clickhouse user")
|
||||||
("force", po::value<bool>()->default_value(false), "Stop with KILL signal instead of TERM")
|
("force", po::value<bool>()->default_value(false), "Stop with KILL signal instead of TERM")
|
||||||
("do-not-kill", po::bool_switch(), "Do not send KILL even if TERM did not help")
|
("do-not-kill", po::bool_switch(), "Do not send KILL even if TERM did not help")
|
||||||
|
("max-tries", po::value<unsigned>()->default_value(60), "Max number of tries for waiting the server (with 1 second delay)")
|
||||||
;
|
;
|
||||||
|
|
||||||
po::variables_map options;
|
po::variables_map options;
|
||||||
@ -1270,10 +1273,11 @@ int mainEntryClickHouseRestart(int argc, char ** argv)
|
|||||||
|
|
||||||
bool force = options["force"].as<bool>();
|
bool force = options["force"].as<bool>();
|
||||||
bool do_not_kill = options["do-not-kill"].as<bool>();
|
bool do_not_kill = options["do-not-kill"].as<bool>();
|
||||||
if (int res = stop(pid_file, force, do_not_kill))
|
unsigned max_tries = options["max-tries"].as<unsigned>();
|
||||||
return res;
|
|
||||||
|
|
||||||
return start(user, executable, config, pid_file);
|
if (int res = stop(pid_file, force, do_not_kill, max_tries))
|
||||||
|
return res;
|
||||||
|
return start(user, executable, config, pid_file, max_tries);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
|
@ -13,7 +13,6 @@ clickhouse_embed_binaries(
|
|||||||
|
|
||||||
set(CLICKHOUSE_KEEPER_SOURCES
|
set(CLICKHOUSE_KEEPER_SOURCES
|
||||||
Keeper.cpp
|
Keeper.cpp
|
||||||
TinyContext.cpp
|
|
||||||
)
|
)
|
||||||
|
|
||||||
set (CLICKHOUSE_KEEPER_LINK
|
set (CLICKHOUSE_KEEPER_LINK
|
||||||
@ -49,6 +48,8 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateMachine.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStateManager.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperStorage.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/KeeperAsynchronousMetrics.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/TinyContext.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/pathUtils.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/pathUtils.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SessionExpiryQueue.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SessionExpiryQueue.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SummingStateMachine.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Coordination/SummingStateMachine.cpp
|
||||||
@ -64,7 +65,18 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/KeeperTCPHandler.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/TCPServer.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/NotFoundHandler.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ProtocolServerAdapter.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/ProtocolServerAdapter.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusRequestHandler.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/PrometheusMetricsWriter.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTPRequestHandlerFactoryMain.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServer.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/ReadHeaders.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnection.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerRequest.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerResponse.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/HTTPServerConnectionFactory.cpp
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Server/HTTP/WriteBufferFromHTTPServerResponse.cpp
|
||||||
|
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CachedCompressedReadBuffer.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CachedCompressedReadBuffer.cpp
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CheckingCompressedReadBuffer.cpp
|
${CMAKE_CURRENT_SOURCE_DIR}/../../src/Compression/CheckingCompressedReadBuffer.cpp
|
||||||
@ -96,9 +108,7 @@ if (BUILD_STANDALONE_KEEPER)
|
|||||||
${CMAKE_CURRENT_BINARY_DIR}/../../src/Daemon/GitHash.generated.cpp
|
${CMAKE_CURRENT_BINARY_DIR}/../../src/Daemon/GitHash.generated.cpp
|
||||||
|
|
||||||
Keeper.cpp
|
Keeper.cpp
|
||||||
TinyContext.cpp
|
|
||||||
clickhouse-keeper.cpp
|
clickhouse-keeper.cpp
|
||||||
|
|
||||||
)
|
)
|
||||||
|
|
||||||
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
clickhouse_add_executable(clickhouse-keeper ${CLICKHOUSE_KEEPER_STANDALONE_SOURCES})
|
||||||
|
@ -6,7 +6,6 @@
|
|||||||
#include <Interpreters/DNSCacheUpdater.h>
|
#include <Interpreters/DNSCacheUpdater.h>
|
||||||
#include <Coordination/Defines.h>
|
#include <Coordination/Defines.h>
|
||||||
#include <Common/Config/ConfigReloader.h>
|
#include <Common/Config/ConfigReloader.h>
|
||||||
#include <Server/TCPServer.h>
|
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <IO/UseSSL.h>
|
#include <IO/UseSSL.h>
|
||||||
#include <Core/ServerUUID.h>
|
#include <Core/ServerUUID.h>
|
||||||
@ -22,8 +21,15 @@
|
|||||||
#include <Poco/Environment.h>
|
#include <Poco/Environment.h>
|
||||||
#include <sys/stat.h>
|
#include <sys/stat.h>
|
||||||
#include <pwd.h>
|
#include <pwd.h>
|
||||||
#include <Coordination/FourLetterCommand.h>
|
|
||||||
|
|
||||||
|
#include <Coordination/FourLetterCommand.h>
|
||||||
|
#include <Coordination/KeeperAsynchronousMetrics.h>
|
||||||
|
|
||||||
|
#include <Server/HTTP/HTTPServer.h>
|
||||||
|
#include <Server/TCPServer.h>
|
||||||
|
#include <Server/HTTPHandlerFactory.h>
|
||||||
|
|
||||||
|
#include "Core/Defines.h"
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
#include "config_version.h"
|
#include "config_version.h"
|
||||||
|
|
||||||
@ -52,6 +58,16 @@ int mainEntryClickHouseKeeper(int argc, char ** argv)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef KEEPER_STANDALONE_BUILD
|
||||||
|
|
||||||
|
// Weak symbols don't work correctly on Darwin
|
||||||
|
// so we have a stub implementation to avoid linker errors
|
||||||
|
void collectCrashLog(
|
||||||
|
Int32, UInt64, const String &, const StackTrace &)
|
||||||
|
{}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -261,6 +277,60 @@ void Keeper::defineOptions(Poco::Util::OptionSet & options)
|
|||||||
BaseDaemon::defineOptions(options);
|
BaseDaemon::defineOptions(options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct Keeper::KeeperHTTPContext : public IHTTPContext
|
||||||
|
{
|
||||||
|
explicit KeeperHTTPContext(TinyContextPtr context_)
|
||||||
|
: context(std::move(context_))
|
||||||
|
{}
|
||||||
|
|
||||||
|
uint64_t getMaxHstsAge() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.hsts_max_age", 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t getMaxUriSize() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_max_uri_size", 1048576);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t getMaxFields() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_max_fields", 1000000);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t getMaxFieldNameSize() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_max_field_name_size", 1048576);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t getMaxFieldValueSize() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_max_field_value_size", 1048576);
|
||||||
|
}
|
||||||
|
|
||||||
|
uint64_t getMaxChunkSize() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_max_chunk_size", 100_GiB);
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Timespan getReceiveTimeout() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_receive_timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
Poco::Timespan getSendTimeout() const override
|
||||||
|
{
|
||||||
|
return context->getConfigRef().getUInt64("keeper_server.http_send_timeout", DEFAULT_HTTP_READ_BUFFER_TIMEOUT);
|
||||||
|
}
|
||||||
|
|
||||||
|
TinyContextPtr context;
|
||||||
|
};
|
||||||
|
|
||||||
|
HTTPContextPtr Keeper::httpContext()
|
||||||
|
{
|
||||||
|
return std::make_shared<KeeperHTTPContext>(tiny_context);
|
||||||
|
}
|
||||||
|
|
||||||
int Keeper::main(const std::vector<std::string> & /*args*/)
|
int Keeper::main(const std::vector<std::string> & /*args*/)
|
||||||
try
|
try
|
||||||
{
|
{
|
||||||
@ -335,6 +405,25 @@ try
|
|||||||
DNSResolver::instance().setDisableCacheFlag();
|
DNSResolver::instance().setDisableCacheFlag();
|
||||||
|
|
||||||
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
Poco::ThreadPool server_pool(3, config().getUInt("max_connections", 1024));
|
||||||
|
std::mutex servers_lock;
|
||||||
|
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
||||||
|
|
||||||
|
tiny_context = std::make_shared<TinyContext>();
|
||||||
|
/// This object will periodically calculate some metrics.
|
||||||
|
KeeperAsynchronousMetrics async_metrics(
|
||||||
|
tiny_context,
|
||||||
|
config().getUInt("asynchronous_metrics_update_period_s", 1),
|
||||||
|
[&]() -> std::vector<ProtocolServerMetrics>
|
||||||
|
{
|
||||||
|
std::vector<ProtocolServerMetrics> metrics;
|
||||||
|
|
||||||
|
std::lock_guard lock(servers_lock);
|
||||||
|
metrics.reserve(servers->size());
|
||||||
|
for (const auto & server : *servers)
|
||||||
|
metrics.emplace_back(ProtocolServerMetrics{server.getPortName(), server.currentThreads()});
|
||||||
|
return metrics;
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
std::vector<std::string> listen_hosts = DB::getMultipleValuesFromConfig(config(), "", "listen_host");
|
||||||
|
|
||||||
@ -346,15 +435,13 @@ try
|
|||||||
listen_try = true;
|
listen_try = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto servers = std::make_shared<std::vector<ProtocolServerAdapter>>();
|
|
||||||
|
|
||||||
/// Initialize keeper RAFT. Do nothing if no keeper_server in config.
|
/// Initialize keeper RAFT. Do nothing if no keeper_server in config.
|
||||||
tiny_context.initializeKeeperDispatcher(/* start_async = */ true);
|
tiny_context->initializeKeeperDispatcher(/* start_async = */ true);
|
||||||
FourLetterCommandFactory::registerCommands(*tiny_context.getKeeperDispatcher());
|
FourLetterCommandFactory::registerCommands(*tiny_context->getKeeperDispatcher());
|
||||||
|
|
||||||
auto config_getter = [this] () -> const Poco::Util::AbstractConfiguration &
|
auto config_getter = [this] () -> const Poco::Util::AbstractConfiguration &
|
||||||
{
|
{
|
||||||
return tiny_context.getConfigRef();
|
return tiny_context->getConfigRef();
|
||||||
};
|
};
|
||||||
|
|
||||||
for (const auto & listen_host : listen_hosts)
|
for (const auto & listen_host : listen_hosts)
|
||||||
@ -373,7 +460,7 @@ try
|
|||||||
"Keeper (tcp): " + address.toString(),
|
"Keeper (tcp): " + address.toString(),
|
||||||
std::make_unique<TCPServer>(
|
std::make_unique<TCPServer>(
|
||||||
new KeeperTCPHandlerFactory(
|
new KeeperTCPHandlerFactory(
|
||||||
config_getter, tiny_context.getKeeperDispatcher(),
|
config_getter, tiny_context->getKeeperDispatcher(),
|
||||||
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
||||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), false), server_pool, socket));
|
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), false), server_pool, socket));
|
||||||
});
|
});
|
||||||
@ -392,7 +479,7 @@ try
|
|||||||
"Keeper with secure protocol (tcp_secure): " + address.toString(),
|
"Keeper with secure protocol (tcp_secure): " + address.toString(),
|
||||||
std::make_unique<TCPServer>(
|
std::make_unique<TCPServer>(
|
||||||
new KeeperTCPHandlerFactory(
|
new KeeperTCPHandlerFactory(
|
||||||
config_getter, tiny_context.getKeeperDispatcher(),
|
config_getter, tiny_context->getKeeperDispatcher(),
|
||||||
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
config().getUInt64("keeper_server.socket_receive_timeout_sec", DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC),
|
||||||
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
|
config().getUInt64("keeper_server.socket_send_timeout_sec", DBMS_DEFAULT_SEND_TIMEOUT_SEC), true), server_pool, socket));
|
||||||
#else
|
#else
|
||||||
@ -401,6 +488,29 @@ try
|
|||||||
ErrorCodes::SUPPORT_IS_DISABLED};
|
ErrorCodes::SUPPORT_IS_DISABLED};
|
||||||
#endif
|
#endif
|
||||||
});
|
});
|
||||||
|
|
||||||
|
const auto & config = config_getter();
|
||||||
|
Poco::Timespan keep_alive_timeout(config.getUInt("keep_alive_timeout", 10), 0);
|
||||||
|
Poco::Net::HTTPServerParams::Ptr http_params = new Poco::Net::HTTPServerParams;
|
||||||
|
http_params->setTimeout(DBMS_DEFAULT_RECEIVE_TIMEOUT_SEC);
|
||||||
|
http_params->setKeepAliveTimeout(keep_alive_timeout);
|
||||||
|
|
||||||
|
/// Prometheus (if defined and not setup yet with http_port)
|
||||||
|
port_name = "prometheus.port";
|
||||||
|
createServer(listen_host, port_name, listen_try, [&](UInt16 port)
|
||||||
|
{
|
||||||
|
Poco::Net::ServerSocket socket;
|
||||||
|
auto address = socketBindListen(socket, listen_host, port);
|
||||||
|
auto http_context = httpContext();
|
||||||
|
socket.setReceiveTimeout(http_context->getReceiveTimeout());
|
||||||
|
socket.setSendTimeout(http_context->getSendTimeout());
|
||||||
|
servers->emplace_back(
|
||||||
|
listen_host,
|
||||||
|
port_name,
|
||||||
|
"Prometheus: http://" + address.toString(),
|
||||||
|
std::make_unique<HTTPServer>(
|
||||||
|
std::move(http_context), createPrometheusMainHandlerFactory(*this, config_getter(), async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto & server : *servers)
|
for (auto & server : *servers)
|
||||||
@ -409,6 +519,8 @@ try
|
|||||||
LOG_INFO(log, "Listening for {}", server.getDescription());
|
LOG_INFO(log, "Listening for {}", server.getDescription());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async_metrics.start();
|
||||||
|
|
||||||
zkutil::EventPtr unused_event = std::make_shared<Poco::Event>();
|
zkutil::EventPtr unused_event = std::make_shared<Poco::Event>();
|
||||||
zkutil::ZooKeeperNodeCache unused_cache([] { return nullptr; });
|
zkutil::ZooKeeperNodeCache unused_cache([] { return nullptr; });
|
||||||
/// ConfigReloader have to strict parameters which are redundant in our case
|
/// ConfigReloader have to strict parameters which are redundant in our case
|
||||||
@ -421,7 +533,7 @@ try
|
|||||||
[&](ConfigurationPtr config, bool /* initial_loading */)
|
[&](ConfigurationPtr config, bool /* initial_loading */)
|
||||||
{
|
{
|
||||||
if (config->has("keeper_server"))
|
if (config->has("keeper_server"))
|
||||||
tiny_context.updateKeeperConfiguration(*config);
|
tiny_context->updateKeeperConfiguration(*config);
|
||||||
},
|
},
|
||||||
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
/* already_loaded = */ false); /// Reload it right now (initial loading)
|
||||||
|
|
||||||
@ -429,6 +541,8 @@ try
|
|||||||
LOG_INFO(log, "Shutting down.");
|
LOG_INFO(log, "Shutting down.");
|
||||||
main_config_reloader.reset();
|
main_config_reloader.reset();
|
||||||
|
|
||||||
|
async_metrics.stop();
|
||||||
|
|
||||||
LOG_DEBUG(log, "Waiting for current connections to Keeper to finish.");
|
LOG_DEBUG(log, "Waiting for current connections to Keeper to finish.");
|
||||||
size_t current_connections = 0;
|
size_t current_connections = 0;
|
||||||
for (auto & server : *servers)
|
for (auto & server : *servers)
|
||||||
@ -450,7 +564,7 @@ try
|
|||||||
else
|
else
|
||||||
LOG_INFO(log, "Closed connections to Keeper.");
|
LOG_INFO(log, "Closed connections to Keeper.");
|
||||||
|
|
||||||
tiny_context.shutdownKeeperDispatcher();
|
tiny_context->shutdownKeeperDispatcher();
|
||||||
|
|
||||||
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
/// Wait server pool to avoid use-after-free of destroyed context in the handlers
|
||||||
server_pool.joinAll();
|
server_pool.joinAll();
|
||||||
|
@ -1,8 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Server/IServer.h>
|
#include <Server/IServer.h>
|
||||||
|
#include <Server/HTTP/HTTPContext.h>
|
||||||
#include <Daemon/BaseDaemon.h>
|
#include <Daemon/BaseDaemon.h>
|
||||||
#include "TinyContext.h"
|
#include <Coordination/TinyContext.h>
|
||||||
|
|
||||||
namespace Poco
|
namespace Poco
|
||||||
{
|
{
|
||||||
@ -15,29 +16,40 @@ namespace Poco
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
/// standalone clickhouse-keeper server (replacement for ZooKeeper). Uses the same
|
/// standalone clickhouse-keeper server (replacement for ZooKeeper). Uses the same
|
||||||
/// config as clickhouse-server. Serves requests on TCP ports with or without
|
/// config as clickhouse-server. Serves requests on TCP ports with or without
|
||||||
/// SSL using ZooKeeper protocol.
|
/// SSL using ZooKeeper protocol.
|
||||||
class Keeper : public BaseDaemon
|
class Keeper : public BaseDaemon, public IServer
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
using ServerApplication::run;
|
using ServerApplication::run;
|
||||||
|
|
||||||
Poco::Util::LayeredConfiguration & config() const
|
Poco::Util::LayeredConfiguration & config() const override
|
||||||
{
|
{
|
||||||
return BaseDaemon::config();
|
return BaseDaemon::config();
|
||||||
}
|
}
|
||||||
|
|
||||||
Poco::Logger & logger() const
|
Poco::Logger & logger() const override
|
||||||
{
|
{
|
||||||
return BaseDaemon::logger();
|
return BaseDaemon::logger();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool isCancelled() const
|
bool isCancelled() const override
|
||||||
{
|
{
|
||||||
return BaseDaemon::isCancelled();
|
return BaseDaemon::isCancelled();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Returns global application's context.
|
||||||
|
ContextMutablePtr context() const override
|
||||||
|
{
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot fetch context for Keeper");
|
||||||
|
}
|
||||||
|
|
||||||
void defineOptions(Poco::Util::OptionSet & _options) override;
|
void defineOptions(Poco::Util::OptionSet & _options) override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -56,7 +68,10 @@ protected:
|
|||||||
std::string getDefaultConfigFileName() const override;
|
std::string getDefaultConfigFileName() const override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
TinyContext tiny_context;
|
TinyContextPtr tiny_context;
|
||||||
|
|
||||||
|
struct KeeperHTTPContext;
|
||||||
|
HTTPContextPtr httpContext();
|
||||||
|
|
||||||
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
Poco::Net::SocketAddress socketBindListen(Poco::Net::ServerSocket & socket, const std::string & host, UInt16 port, [[maybe_unused]] bool secure = false) const;
|
||||||
|
|
||||||
|
@ -1,6 +1,6 @@
|
|||||||
#include "MetricsTransmitter.h"
|
#include "MetricsTransmitter.h"
|
||||||
|
|
||||||
#include <Interpreters/AsynchronousMetrics.h>
|
#include <Common/AsynchronousMetrics.h>
|
||||||
|
|
||||||
#include <Common/CurrentMetrics.h>
|
#include <Common/CurrentMetrics.h>
|
||||||
#include <Common/Exception.h>
|
#include <Common/Exception.h>
|
||||||
|
@ -46,7 +46,7 @@
|
|||||||
#include <IO/ReadBufferFromFile.h>
|
#include <IO/ReadBufferFromFile.h>
|
||||||
#include <IO/IOThreadPool.h>
|
#include <IO/IOThreadPool.h>
|
||||||
#include <IO/UseSSL.h>
|
#include <IO/UseSSL.h>
|
||||||
#include <Interpreters/AsynchronousMetrics.h>
|
#include <Interpreters/ServerAsynchronousMetrics.h>
|
||||||
#include <Interpreters/DDLWorker.h>
|
#include <Interpreters/DDLWorker.h>
|
||||||
#include <Interpreters/DNSCacheUpdater.h>
|
#include <Interpreters/DNSCacheUpdater.h>
|
||||||
#include <Interpreters/DatabaseCatalog.h>
|
#include <Interpreters/DatabaseCatalog.h>
|
||||||
@ -212,6 +212,7 @@ try
|
|||||||
|
|
||||||
/// Clearing old temporary files.
|
/// Clearing old temporary files.
|
||||||
fs::directory_iterator dir_end;
|
fs::directory_iterator dir_end;
|
||||||
|
size_t unknown_files = 0;
|
||||||
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
for (fs::directory_iterator it(path); it != dir_end; ++it)
|
||||||
{
|
{
|
||||||
if (it->is_regular_file() && startsWith(it->path().filename(), "tmp"))
|
if (it->is_regular_file() && startsWith(it->path().filename(), "tmp"))
|
||||||
@ -220,8 +221,17 @@ try
|
|||||||
fs::remove(it->path());
|
fs::remove(it->path());
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
LOG_DEBUG(log, "Found unknown file in temporary path {}", it->path().string());
|
{
|
||||||
|
unknown_files++;
|
||||||
|
if (unknown_files < 100)
|
||||||
|
LOG_DEBUG(log, "Found unknown {} {} in temporary path",
|
||||||
|
it->is_regular_file() ? "file" : (it->is_directory() ? "directory" : "element"),
|
||||||
|
it->path().string());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (unknown_files)
|
||||||
|
LOG_DEBUG(log, "Found {} unknown files in temporary path", unknown_files);
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
{
|
{
|
||||||
@ -793,7 +803,7 @@ try
|
|||||||
std::vector<ProtocolServerAdapter> servers;
|
std::vector<ProtocolServerAdapter> servers;
|
||||||
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
std::vector<ProtocolServerAdapter> servers_to_start_before_tables;
|
||||||
/// This object will periodically calculate some metrics.
|
/// This object will periodically calculate some metrics.
|
||||||
AsynchronousMetrics async_metrics(
|
ServerAsynchronousMetrics async_metrics(
|
||||||
global_context,
|
global_context,
|
||||||
config().getUInt("asynchronous_metrics_update_period_s", 1),
|
config().getUInt("asynchronous_metrics_update_period_s", 1),
|
||||||
config().getUInt("asynchronous_heavy_metrics_update_period_s", 120),
|
config().getUInt("asynchronous_heavy_metrics_update_period_s", 120),
|
||||||
@ -1478,8 +1488,7 @@ try
|
|||||||
if (settings.async_insert_threads)
|
if (settings.async_insert_threads)
|
||||||
global_context->setAsynchronousInsertQueue(std::make_shared<AsynchronousInsertQueue>(
|
global_context->setAsynchronousInsertQueue(std::make_shared<AsynchronousInsertQueue>(
|
||||||
global_context,
|
global_context,
|
||||||
settings.async_insert_threads,
|
settings.async_insert_threads));
|
||||||
settings.async_insert_cleanup_timeout_ms));
|
|
||||||
|
|
||||||
/// Size of cache for marks (index of MergeTree family of tables).
|
/// Size of cache for marks (index of MergeTree family of tables).
|
||||||
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
size_t mark_cache_size = config().getUInt64("mark_cache_size", 5368709120);
|
||||||
@ -1941,15 +1950,15 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
|||||||
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this));
|
return TCPServerConnectionFactory::Ptr(new PostgreSQLHandlerFactory(*this));
|
||||||
if (type == "http")
|
if (type == "http")
|
||||||
return TCPServerConnectionFactory::Ptr(
|
return TCPServerConnectionFactory::Ptr(
|
||||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"))
|
||||||
);
|
);
|
||||||
if (type == "prometheus")
|
if (type == "prometheus")
|
||||||
return TCPServerConnectionFactory::Ptr(
|
return TCPServerConnectionFactory::Ptr(
|
||||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"))
|
||||||
);
|
);
|
||||||
if (type == "interserver")
|
if (type == "interserver")
|
||||||
return TCPServerConnectionFactory::Ptr(
|
return TCPServerConnectionFactory::Ptr(
|
||||||
new HTTPServerConnectionFactory(context(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
new HTTPServerConnectionFactory(httpContext(), http_params, createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"))
|
||||||
);
|
);
|
||||||
|
|
||||||
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
|
throw Exception(ErrorCodes::INVALID_CONFIG_PARAMETER, "Protocol configuration error, unknown protocol name '{}'", type);
|
||||||
@ -1990,6 +1999,11 @@ std::unique_ptr<TCPProtocolStackFactory> Server::buildProtocolStackFromConfig(
|
|||||||
return stack;
|
return stack;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HTTPContextPtr Server::httpContext() const
|
||||||
|
{
|
||||||
|
return std::make_shared<HTTPContext>(context());
|
||||||
|
}
|
||||||
|
|
||||||
void Server::createServers(
|
void Server::createServers(
|
||||||
Poco::Util::AbstractConfiguration & config,
|
Poco::Util::AbstractConfiguration & config,
|
||||||
const Strings & listen_hosts,
|
const Strings & listen_hosts,
|
||||||
@ -2072,7 +2086,7 @@ void Server::createServers(
|
|||||||
port_name,
|
port_name,
|
||||||
"http://" + address.toString(),
|
"http://" + address.toString(),
|
||||||
std::make_unique<HTTPServer>(
|
std::make_unique<HTTPServer>(
|
||||||
context(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPHandler-factory"), server_pool, socket, http_params));
|
||||||
});
|
});
|
||||||
|
|
||||||
/// HTTPS
|
/// HTTPS
|
||||||
@ -2089,7 +2103,7 @@ void Server::createServers(
|
|||||||
port_name,
|
port_name,
|
||||||
"https://" + address.toString(),
|
"https://" + address.toString(),
|
||||||
std::make_unique<HTTPServer>(
|
std::make_unique<HTTPServer>(
|
||||||
context(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
httpContext(), createHandlerFactory(*this, config, async_metrics, "HTTPSHandler-factory"), server_pool, socket, http_params));
|
||||||
#else
|
#else
|
||||||
UNUSED(port);
|
UNUSED(port);
|
||||||
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
|
throw Exception{"HTTPS protocol is disabled because Poco library was built without NetSSL support.",
|
||||||
@ -2214,7 +2228,7 @@ void Server::createServers(
|
|||||||
port_name,
|
port_name,
|
||||||
"Prometheus: http://" + address.toString(),
|
"Prometheus: http://" + address.toString(),
|
||||||
std::make_unique<HTTPServer>(
|
std::make_unique<HTTPServer>(
|
||||||
context(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
httpContext(), createHandlerFactory(*this, config, async_metrics, "PrometheusHandler-factory"), server_pool, socket, http_params));
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2234,7 +2248,7 @@ void Server::createServers(
|
|||||||
port_name,
|
port_name,
|
||||||
"replica communication (interserver): http://" + address.toString(),
|
"replica communication (interserver): http://" + address.toString(),
|
||||||
std::make_unique<HTTPServer>(
|
std::make_unique<HTTPServer>(
|
||||||
context(),
|
httpContext(),
|
||||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
|
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPHandler-factory"),
|
||||||
server_pool,
|
server_pool,
|
||||||
socket,
|
socket,
|
||||||
@ -2254,7 +2268,7 @@ void Server::createServers(
|
|||||||
port_name,
|
port_name,
|
||||||
"secure replica communication (interserver): https://" + address.toString(),
|
"secure replica communication (interserver): https://" + address.toString(),
|
||||||
std::make_unique<HTTPServer>(
|
std::make_unique<HTTPServer>(
|
||||||
context(),
|
httpContext(),
|
||||||
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
createHandlerFactory(*this, config, async_metrics, "InterserverIOHTTPSHandler-factory"),
|
||||||
server_pool,
|
server_pool,
|
||||||
socket,
|
socket,
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
#include <Server/IServer.h>
|
#include <Server/IServer.h>
|
||||||
|
|
||||||
#include <Daemon/BaseDaemon.h>
|
#include <Daemon/BaseDaemon.h>
|
||||||
|
#include "Server/HTTP/HTTPContext.h"
|
||||||
#include <Server/TCPProtocolStackFactory.h>
|
#include <Server/TCPProtocolStackFactory.h>
|
||||||
#include <Poco/Net/HTTPServerParams.h>
|
#include <Poco/Net/HTTPServerParams.h>
|
||||||
|
|
||||||
@ -72,6 +73,8 @@ private:
|
|||||||
/// Updated/recent config, to compare http_handlers
|
/// Updated/recent config, to compare http_handlers
|
||||||
ConfigurationPtr latest_config;
|
ConfigurationPtr latest_config;
|
||||||
|
|
||||||
|
HTTPContextPtr httpContext() const;
|
||||||
|
|
||||||
Poco::Net::SocketAddress socketBindListen(
|
Poco::Net::SocketAddress socketBindListen(
|
||||||
const Poco::Util::AbstractConfiguration & config,
|
const Poco::Util::AbstractConfiguration & config,
|
||||||
Poco::Net::ServerSocket & socket,
|
Poco::Net::ServerSocket & socket,
|
||||||
|
@ -139,8 +139,10 @@ void SettingsProfilesCache::mergeSettingsAndConstraintsFor(EnabledSettings & ena
|
|||||||
merged_settings.merge(enabled.params.settings_from_user);
|
merged_settings.merge(enabled.params.settings_from_user);
|
||||||
|
|
||||||
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
auto info = std::make_shared<SettingsProfilesInfo>(access_control);
|
||||||
info->profiles = enabled.params.settings_from_user.toProfileIDs();
|
|
||||||
|
info->profiles = merged_settings.toProfileIDs();
|
||||||
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
|
substituteProfiles(merged_settings, info->profiles_with_implicit, info->names_of_profiles);
|
||||||
|
|
||||||
info->settings = merged_settings.toSettingsChanges();
|
info->settings = merged_settings.toSettingsChanges();
|
||||||
info->constraints = merged_settings.toSettingsConstraints(access_control);
|
info->constraints = merged_settings.toSettingsConstraints(access_control);
|
||||||
|
|
||||||
|
@ -1,11 +1,18 @@
|
|||||||
#include <Access/SettingsProfilesInfo.h>
|
#include <Access/SettingsProfilesInfo.h>
|
||||||
|
#include <Access/AccessControl.h>
|
||||||
#include <Access/SettingsConstraintsAndProfileIDs.h>
|
#include <Access/SettingsConstraintsAndProfileIDs.h>
|
||||||
#include <base/removeDuplicates.h>
|
#include <base/removeDuplicates.h>
|
||||||
|
#include <Common/Exception.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
|
namespace ErrorCodes
|
||||||
|
{
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
|
}
|
||||||
|
|
||||||
bool operator==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs)
|
bool operator==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs)
|
||||||
{
|
{
|
||||||
if (lhs.settings != rhs.settings)
|
if (lhs.settings != rhs.settings)
|
||||||
@ -55,4 +62,26 @@ SettingsProfilesInfo::getConstraintsAndProfileIDs(const std::shared_ptr<const Se
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Strings SettingsProfilesInfo::getProfileNames() const
|
||||||
|
{
|
||||||
|
Strings result;
|
||||||
|
result.reserve(profiles.size());
|
||||||
|
for (const auto & profile_id : profiles)
|
||||||
|
{
|
||||||
|
const auto p = names_of_profiles.find(profile_id);
|
||||||
|
if (p != names_of_profiles.end())
|
||||||
|
result.push_back(p->second);
|
||||||
|
else
|
||||||
|
{
|
||||||
|
if (const auto name = access_control.tryReadName(profile_id))
|
||||||
|
// We could've updated cache here, but it is a very rare case, so don't bother.
|
||||||
|
result.push_back(*name);
|
||||||
|
else
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to get profile name for {}", toString(profile_id));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -36,15 +36,7 @@ struct SettingsProfilesInfo
|
|||||||
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
friend bool operator ==(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs);
|
||||||
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
friend bool operator !=(const SettingsProfilesInfo & lhs, const SettingsProfilesInfo & rhs) { return !(lhs == rhs); }
|
||||||
|
|
||||||
Strings getProfileNames() const
|
Strings getProfileNames() const;
|
||||||
{
|
|
||||||
Strings result;
|
|
||||||
result.reserve(profiles.size());
|
|
||||||
for (const auto & profile_id : profiles)
|
|
||||||
result.push_back(names_of_profiles.at(profile_id));
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
const AccessControl & access_control;
|
const AccessControl & access_control;
|
||||||
|
@ -160,7 +160,7 @@ public:
|
|||||||
else
|
else
|
||||||
{
|
{
|
||||||
writeBinary(UInt8(0), buf);
|
writeBinary(UInt8(0), buf);
|
||||||
serialization->serializeBinary(elem, buf);
|
serialization->serializeBinary(elem, buf, {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -181,7 +181,7 @@ public:
|
|||||||
UInt8 is_null = 0;
|
UInt8 is_null = 0;
|
||||||
readBinary(is_null, buf);
|
readBinary(is_null, buf);
|
||||||
if (!is_null)
|
if (!is_null)
|
||||||
serialization->deserializeBinary(arr[i], buf);
|
serialization->deserializeBinary(arr[i], buf, {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -30,6 +30,7 @@ namespace ErrorCodes
|
|||||||
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
extern const int ILLEGAL_TYPE_OF_ARGUMENT;
|
||||||
extern const int NOT_IMPLEMENTED;
|
extern const int NOT_IMPLEMENTED;
|
||||||
extern const int TOO_LARGE_STRING_SIZE;
|
extern const int TOO_LARGE_STRING_SIZE;
|
||||||
|
extern const int LOGICAL_ERROR;
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Aggregate functions that store one of passed values.
|
/** Aggregate functions that store one of passed values.
|
||||||
@ -485,13 +486,15 @@ struct SingleValueDataString //-V730
|
|||||||
private:
|
private:
|
||||||
using Self = SingleValueDataString;
|
using Self = SingleValueDataString;
|
||||||
|
|
||||||
Int32 size = -1; /// -1 indicates that there is no value.
|
/// 0 size indicates that there is no value. Empty string must has terminating '\0' and, therefore, size of empty string is 1
|
||||||
Int32 capacity = 0; /// power of two or zero
|
UInt32 size = 0;
|
||||||
|
UInt32 capacity = 0; /// power of two or zero
|
||||||
char * large_data;
|
char * large_data;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static constexpr Int32 AUTOMATIC_STORAGE_SIZE = 64;
|
static constexpr UInt32 AUTOMATIC_STORAGE_SIZE = 64;
|
||||||
static constexpr Int32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
|
static constexpr UInt32 MAX_SMALL_STRING_SIZE = AUTOMATIC_STORAGE_SIZE - sizeof(size) - sizeof(capacity) - sizeof(large_data);
|
||||||
|
static constexpr UInt32 MAX_STRING_SIZE = std::numeric_limits<Int32>::max();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
|
char small_data[MAX_SMALL_STRING_SIZE]; /// Including the terminating zero.
|
||||||
@ -502,7 +505,7 @@ public:
|
|||||||
|
|
||||||
bool has() const
|
bool has() const
|
||||||
{
|
{
|
||||||
return size >= 0;
|
return size;
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -536,20 +539,27 @@ public:
|
|||||||
|
|
||||||
void write(WriteBuffer & buf, const ISerialization & /*serialization*/) const
|
void write(WriteBuffer & buf, const ISerialization & /*serialization*/) const
|
||||||
{
|
{
|
||||||
writeBinary(size, buf);
|
if (unlikely(MAX_STRING_SIZE < size))
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "String size is too big ({}), it's a bug", size);
|
||||||
|
|
||||||
|
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
|
||||||
|
Int32 size_to_write = size ? size : -1;
|
||||||
|
writeBinary(size_to_write, buf);
|
||||||
if (has())
|
if (has())
|
||||||
buf.write(getData(), size);
|
buf.write(getData(), size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void allocateLargeDataIfNeeded(Int64 size_to_reserve, Arena * arena)
|
void allocateLargeDataIfNeeded(UInt32 size_to_reserve, Arena * arena)
|
||||||
{
|
{
|
||||||
if (capacity < size_to_reserve)
|
if (capacity < size_to_reserve)
|
||||||
{
|
{
|
||||||
capacity = static_cast<Int32>(roundUpToPowerOfTwoOrZero(size_to_reserve));
|
if (unlikely(MAX_STRING_SIZE < size_to_reserve))
|
||||||
/// It might happen if the size was too big and the rounded value does not fit a size_t
|
|
||||||
if (unlikely(capacity < size_to_reserve))
|
|
||||||
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", size_to_reserve);
|
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", size_to_reserve);
|
||||||
|
|
||||||
|
size_t rounded_capacity = roundUpToPowerOfTwoOrZero(size_to_reserve);
|
||||||
|
chassert(rounded_capacity <= MAX_STRING_SIZE + 1); /// rounded_capacity <= 2^31
|
||||||
|
capacity = static_cast<UInt32>(rounded_capacity);
|
||||||
|
|
||||||
/// Don't free large_data here.
|
/// Don't free large_data here.
|
||||||
large_data = arena->alloc(capacity);
|
large_data = arena->alloc(capacity);
|
||||||
}
|
}
|
||||||
@ -557,31 +567,28 @@ public:
|
|||||||
|
|
||||||
void read(ReadBuffer & buf, const ISerialization & /*serialization*/, Arena * arena)
|
void read(ReadBuffer & buf, const ISerialization & /*serialization*/, Arena * arena)
|
||||||
{
|
{
|
||||||
Int32 rhs_size;
|
/// For serialization we use signed Int32 (for historical reasons), -1 means "no value"
|
||||||
readBinary(rhs_size, buf);
|
Int32 rhs_size_signed;
|
||||||
|
readBinary(rhs_size_signed, buf);
|
||||||
|
|
||||||
if (rhs_size < 0)
|
if (rhs_size_signed < 0)
|
||||||
{
|
{
|
||||||
/// Don't free large_data here.
|
/// Don't free large_data here.
|
||||||
size = rhs_size;
|
size = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
UInt32 rhs_size = rhs_size_signed;
|
||||||
if (rhs_size <= MAX_SMALL_STRING_SIZE)
|
if (rhs_size <= MAX_SMALL_STRING_SIZE)
|
||||||
{
|
{
|
||||||
/// Don't free large_data here.
|
/// Don't free large_data here.
|
||||||
|
|
||||||
size = rhs_size;
|
size = rhs_size;
|
||||||
|
buf.readStrict(small_data, size);
|
||||||
if (size > 0)
|
|
||||||
buf.readStrict(small_data, size);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
/// Reserve one byte more for null-character
|
/// Reserve one byte more for null-character
|
||||||
Int64 rhs_size_to_reserve = rhs_size;
|
allocateLargeDataIfNeeded(rhs_size + 1, arena);
|
||||||
rhs_size_to_reserve += 1; /// Avoid overflow
|
|
||||||
allocateLargeDataIfNeeded(rhs_size_to_reserve, arena);
|
|
||||||
size = rhs_size;
|
size = rhs_size;
|
||||||
buf.readStrict(large_data, size);
|
buf.readStrict(large_data, size);
|
||||||
}
|
}
|
||||||
@ -616,7 +623,10 @@ public:
|
|||||||
/// Assuming to.has()
|
/// Assuming to.has()
|
||||||
void changeImpl(StringRef value, Arena * arena)
|
void changeImpl(StringRef value, Arena * arena)
|
||||||
{
|
{
|
||||||
Int32 value_size = static_cast<Int32>(value.size);
|
if (unlikely(MAX_STRING_SIZE < value.size))
|
||||||
|
throw Exception(ErrorCodes::TOO_LARGE_STRING_SIZE, "String size is too big ({})", value.size);
|
||||||
|
|
||||||
|
UInt32 value_size = static_cast<UInt32>(value.size);
|
||||||
|
|
||||||
if (value_size <= MAX_SMALL_STRING_SIZE)
|
if (value_size <= MAX_SMALL_STRING_SIZE)
|
||||||
{
|
{
|
||||||
@ -785,7 +795,7 @@ public:
|
|||||||
if (!value.isNull())
|
if (!value.isNull())
|
||||||
{
|
{
|
||||||
writeBinary(true, buf);
|
writeBinary(true, buf);
|
||||||
serialization.serializeBinary(value, buf);
|
serialization.serializeBinary(value, buf, {});
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
writeBinary(false, buf);
|
writeBinary(false, buf);
|
||||||
@ -797,7 +807,7 @@ public:
|
|||||||
readBinary(is_not_null, buf);
|
readBinary(is_not_null, buf);
|
||||||
|
|
||||||
if (is_not_null)
|
if (is_not_null)
|
||||||
serialization.deserializeBinary(value, buf);
|
serialization.deserializeBinary(value, buf, {});
|
||||||
}
|
}
|
||||||
|
|
||||||
void change(const IColumn & column, size_t row_num, Arena *)
|
void change(const IColumn & column, size_t row_num, Arena *)
|
||||||
@ -1065,34 +1075,33 @@ struct AggregateFunctionSingleValueOrNullData : Data
|
|||||||
bool first_value = true;
|
bool first_value = true;
|
||||||
bool is_null = false;
|
bool is_null = false;
|
||||||
|
|
||||||
bool changeIfBetter(const IColumn & column, size_t row_num, Arena * arena)
|
void changeIfBetter(const IColumn & column, size_t row_num, Arena * arena)
|
||||||
{
|
{
|
||||||
if (first_value)
|
if (first_value)
|
||||||
{
|
{
|
||||||
first_value = false;
|
first_value = false;
|
||||||
this->change(column, row_num, arena);
|
this->change(column, row_num, arena);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
else if (!this->isEqualTo(column, row_num))
|
else if (!this->isEqualTo(column, row_num))
|
||||||
{
|
{
|
||||||
is_null = true;
|
is_null = true;
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
bool changeIfBetter(const Self & to, Arena * arena)
|
void changeIfBetter(const Self & to, Arena * arena)
|
||||||
{
|
{
|
||||||
|
if (!to.has())
|
||||||
|
return;
|
||||||
|
|
||||||
if (first_value)
|
if (first_value)
|
||||||
{
|
{
|
||||||
first_value = false;
|
first_value = false;
|
||||||
this->change(to, arena);
|
this->change(to, arena);
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
else if (!this->isEqualTo(to))
|
else if (!this->isEqualTo(to))
|
||||||
{
|
{
|
||||||
is_null = true;
|
is_null = true;
|
||||||
}
|
}
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void addManyDefaults(const IColumn & column, size_t /*length*/, Arena * arena) { this->changeIfBetter(column, 0, arena); }
|
void addManyDefaults(const IColumn & column, size_t /*length*/, Arena * arena) { this->changeIfBetter(column, 0, arena); }
|
||||||
|
@ -155,7 +155,7 @@ public:
|
|||||||
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
|
"Values for {} are expected to be Numeric, Float or Decimal, passed type {}",
|
||||||
getName(), value_type->getName()};
|
getName(), value_type->getName()};
|
||||||
|
|
||||||
WhichDataType value_type_to_check(value_type);
|
WhichDataType value_type_to_check(value_type_without_nullable);
|
||||||
|
|
||||||
/// Do not promote decimal because of implementation issues of this function design
|
/// Do not promote decimal because of implementation issues of this function design
|
||||||
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale
|
/// Currently we cannot get result column type in case of decimal we cannot get decimal scale
|
||||||
@ -296,19 +296,19 @@ public:
|
|||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
{
|
{
|
||||||
serialize = [&](size_t col_idx, const Array & values){ values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
serialize = [&](size_t col_idx, const Array & values){ values_serializations[col_idx]->serializeBinary(values[col_idx], buf, {}); };
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 1:
|
case 1:
|
||||||
{
|
{
|
||||||
serialize = [&](size_t col_idx, const Array & values){ promoted_values_serializations[col_idx]->serializeBinary(values[col_idx], buf); };
|
serialize = [&](size_t col_idx, const Array & values){ promoted_values_serializations[col_idx]->serializeBinary(values[col_idx], buf, {}); };
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
for (const auto & elem : merged_maps)
|
for (const auto & elem : merged_maps)
|
||||||
{
|
{
|
||||||
keys_serialization->serializeBinary(elem.first, buf);
|
keys_serialization->serializeBinary(elem.first, buf, {});
|
||||||
for (size_t col = 0; col < values_types.size(); ++col)
|
for (size_t col = 0; col < values_types.size(); ++col)
|
||||||
serialize(col, elem.second);
|
serialize(col, elem.second);
|
||||||
}
|
}
|
||||||
@ -328,12 +328,12 @@ public:
|
|||||||
{
|
{
|
||||||
case 0:
|
case 0:
|
||||||
{
|
{
|
||||||
deserialize = [&](size_t col_idx, Array & values){ values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
deserialize = [&](size_t col_idx, Array & values){ values_serializations[col_idx]->deserializeBinary(values[col_idx], buf, {}); };
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
case 1:
|
case 1:
|
||||||
{
|
{
|
||||||
deserialize = [&](size_t col_idx, Array & values){ promoted_values_serializations[col_idx]->deserializeBinary(values[col_idx], buf); };
|
deserialize = [&](size_t col_idx, Array & values){ promoted_values_serializations[col_idx]->deserializeBinary(values[col_idx], buf, {}); };
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -341,7 +341,7 @@ public:
|
|||||||
for (size_t i = 0; i < size; ++i)
|
for (size_t i = 0; i < size; ++i)
|
||||||
{
|
{
|
||||||
Field key;
|
Field key;
|
||||||
keys_serialization->deserializeBinary(key, buf);
|
keys_serialization->deserializeBinary(key, buf, {});
|
||||||
|
|
||||||
Array values;
|
Array values;
|
||||||
values.resize(values_types.size());
|
values.resize(values_types.size());
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <Common/HashTable/HashSet.h>
|
#include <Common/HashTable/HashSet.h>
|
||||||
#include <Common/ThreadPool.h>
|
#include <Common/ThreadPool.h>
|
||||||
#include <Common/setThreadName.h>
|
#include <Common/setThreadName.h>
|
||||||
|
#include <Common/scope_guard_safe.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -51,6 +52,10 @@ public:
|
|||||||
|
|
||||||
auto thread_func = [&lhs, &rhs, next_bucket_to_merge, thread_group = CurrentThread::getGroup()]()
|
auto thread_func = [&lhs, &rhs, next_bucket_to_merge, thread_group = CurrentThread::getGroup()]()
|
||||||
{
|
{
|
||||||
|
SCOPE_EXIT_SAFE(
|
||||||
|
if (thread_group)
|
||||||
|
CurrentThread::detachQueryIfNotDetached();
|
||||||
|
);
|
||||||
if (thread_group)
|
if (thread_group)
|
||||||
CurrentThread::attachToIfDetached(thread_group);
|
CurrentThread::attachToIfDetached(thread_group);
|
||||||
setThreadName("UniqExactMerger");
|
setThreadName("UniqExactMerger");
|
||||||
|
@ -16,13 +16,18 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
ConstantNode::ConstantNode(ConstantValuePtr constant_value_)
|
ConstantNode::ConstantNode(ConstantValuePtr constant_value_, QueryTreeNodePtr source_expression)
|
||||||
: IQueryTreeNode(children_size)
|
: IQueryTreeNode(children_size)
|
||||||
, constant_value(std::move(constant_value_))
|
, constant_value(std::move(constant_value_))
|
||||||
, value_string(applyVisitor(FieldVisitorToString(), constant_value->getValue()))
|
, value_string(applyVisitor(FieldVisitorToString(), constant_value->getValue()))
|
||||||
{
|
{
|
||||||
|
children[source_child_index] = std::move(source_expression);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ConstantNode::ConstantNode(ConstantValuePtr constant_value_)
|
||||||
|
: ConstantNode(constant_value_, nullptr /*source_expression*/)
|
||||||
|
{}
|
||||||
|
|
||||||
ConstantNode::ConstantNode(Field value_, DataTypePtr value_data_type_)
|
ConstantNode::ConstantNode(Field value_, DataTypePtr value_data_type_)
|
||||||
: ConstantNode(std::make_shared<ConstantValue>(convertFieldToTypeOrThrow(value_, *value_data_type_), value_data_type_))
|
: ConstantNode(std::make_shared<ConstantValue>(convertFieldToTypeOrThrow(value_, *value_data_type_), value_data_type_))
|
||||||
{}
|
{}
|
||||||
@ -40,6 +45,12 @@ void ConstantNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state
|
|||||||
|
|
||||||
buffer << ", constant_value: " << constant_value->getValue().dump();
|
buffer << ", constant_value: " << constant_value->getValue().dump();
|
||||||
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
||||||
|
|
||||||
|
if (getSourceExpression())
|
||||||
|
{
|
||||||
|
buffer << '\n' << std::string(indent + 2, ' ') << "EXPRESSION " << '\n';
|
||||||
|
getSourceExpression()->dumpTreeImpl(buffer, format_state, indent + 4);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
bool ConstantNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
||||||
|
@ -10,6 +10,8 @@ namespace DB
|
|||||||
/** Constant node represents constant value in query tree.
|
/** Constant node represents constant value in query tree.
|
||||||
* Constant value must be representable by Field.
|
* Constant value must be representable by Field.
|
||||||
* Examples: 1, 'constant_string', [1,2,3].
|
* Examples: 1, 'constant_string', [1,2,3].
|
||||||
|
*
|
||||||
|
* Constant node can optionally keep pointer to its source expression.
|
||||||
*/
|
*/
|
||||||
class ConstantNode;
|
class ConstantNode;
|
||||||
using ConstantNodePtr = std::shared_ptr<ConstantNode>;
|
using ConstantNodePtr = std::shared_ptr<ConstantNode>;
|
||||||
@ -17,6 +19,9 @@ using ConstantNodePtr = std::shared_ptr<ConstantNode>;
|
|||||||
class ConstantNode final : public IQueryTreeNode
|
class ConstantNode final : public IQueryTreeNode
|
||||||
{
|
{
|
||||||
public:
|
public:
|
||||||
|
/// Construct constant query tree node from constant value and source expression
|
||||||
|
explicit ConstantNode(ConstantValuePtr constant_value_, QueryTreeNodePtr source_expression);
|
||||||
|
|
||||||
/// Construct constant query tree node from constant value
|
/// Construct constant query tree node from constant value
|
||||||
explicit ConstantNode(ConstantValuePtr constant_value_);
|
explicit ConstantNode(ConstantValuePtr constant_value_);
|
||||||
|
|
||||||
@ -41,9 +46,22 @@ public:
|
|||||||
return value_string;
|
return value_string;
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstantValuePtr getConstantValueOrNull() const override
|
/// Returns true if constant node has source expression, false otherwise
|
||||||
|
bool hasSourceExpression() const
|
||||||
{
|
{
|
||||||
return constant_value;
|
return children[source_child_index] != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get source expression
|
||||||
|
const QueryTreeNodePtr & getSourceExpression() const
|
||||||
|
{
|
||||||
|
return children[source_child_index];
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get source expression
|
||||||
|
QueryTreeNodePtr & getSourceExpression()
|
||||||
|
{
|
||||||
|
return children[source_child_index];
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodeType getNodeType() const override
|
QueryTreeNodeType getNodeType() const override
|
||||||
@ -71,7 +89,8 @@ private:
|
|||||||
ConstantValuePtr constant_value;
|
ConstantValuePtr constant_value;
|
||||||
String value_string;
|
String value_string;
|
||||||
|
|
||||||
static constexpr size_t children_size = 0;
|
static constexpr size_t children_size = 1;
|
||||||
|
static constexpr size_t source_child_index = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -66,12 +66,6 @@ void FunctionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state
|
|||||||
if (result_type)
|
if (result_type)
|
||||||
buffer << ", result_type: " + result_type->getName();
|
buffer << ", result_type: " + result_type->getName();
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
buffer << ", constant_value: " << constant_value->getValue().dump();
|
|
||||||
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
const auto & parameters = getParameters();
|
const auto & parameters = getParameters();
|
||||||
if (!parameters.getNodes().empty())
|
if (!parameters.getNodes().empty())
|
||||||
{
|
{
|
||||||
@ -109,13 +103,6 @@ bool FunctionNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
|||||||
else if (!result_type && rhs_typed.result_type)
|
else if (!result_type && rhs_typed.result_type)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (constant_value && !rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (!constant_value && rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,17 +120,6 @@ void FunctionNode::updateTreeHashImpl(HashState & hash_state) const
|
|||||||
hash_state.update(result_type_name.size());
|
hash_state.update(result_type_name.size());
|
||||||
hash_state.update(result_type_name);
|
hash_state.update(result_type_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue());
|
|
||||||
hash_state.update(constant_dump.size());
|
|
||||||
hash_state.update(constant_dump);
|
|
||||||
|
|
||||||
auto constant_value_type_name = constant_value->getType()->getName();
|
|
||||||
hash_state.update(constant_value_type_name.size());
|
|
||||||
hash_state.update(constant_value_type_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr FunctionNode::cloneImpl() const
|
QueryTreeNodePtr FunctionNode::cloneImpl() const
|
||||||
@ -156,7 +132,6 @@ QueryTreeNodePtr FunctionNode::cloneImpl() const
|
|||||||
result_function->function = function;
|
result_function->function = function;
|
||||||
result_function->aggregate_function = aggregate_function;
|
result_function->aggregate_function = aggregate_function;
|
||||||
result_function->result_type = result_type;
|
result_function->result_type = result_type;
|
||||||
result_function->constant_value = constant_value;
|
|
||||||
|
|
||||||
return result_function;
|
return result_function;
|
||||||
}
|
}
|
||||||
|
@ -182,17 +182,6 @@ public:
|
|||||||
*/
|
*/
|
||||||
void resolveAsWindowFunction(AggregateFunctionPtr window_function_value, DataTypePtr result_type_value);
|
void resolveAsWindowFunction(AggregateFunctionPtr window_function_value, DataTypePtr result_type_value);
|
||||||
|
|
||||||
/// Perform constant folding for function node
|
|
||||||
void performConstantFolding(ConstantValuePtr constant_folded_value)
|
|
||||||
{
|
|
||||||
constant_value = std::move(constant_folded_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
ConstantValuePtr getConstantValueOrNull() const override
|
|
||||||
{
|
|
||||||
return constant_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
QueryTreeNodeType getNodeType() const override
|
QueryTreeNodeType getNodeType() const override
|
||||||
{
|
{
|
||||||
return QueryTreeNodeType::FUNCTION;
|
return QueryTreeNodeType::FUNCTION;
|
||||||
@ -219,7 +208,6 @@ private:
|
|||||||
FunctionOverloadResolverPtr function;
|
FunctionOverloadResolverPtr function;
|
||||||
AggregateFunctionPtr aggregate_function;
|
AggregateFunctionPtr aggregate_function;
|
||||||
DataTypePtr result_type;
|
DataTypePtr result_type;
|
||||||
ConstantValuePtr constant_value;
|
|
||||||
|
|
||||||
static constexpr size_t parameters_child_index = 0;
|
static constexpr size_t parameters_child_index = 0;
|
||||||
static constexpr size_t arguments_child_index = 1;
|
static constexpr size_t arguments_child_index = 1;
|
||||||
|
@ -21,7 +21,6 @@ namespace DB
|
|||||||
namespace ErrorCodes
|
namespace ErrorCodes
|
||||||
{
|
{
|
||||||
extern const int UNSUPPORTED_METHOD;
|
extern const int UNSUPPORTED_METHOD;
|
||||||
extern const int LOGICAL_ERROR;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
class WriteBuffer;
|
class WriteBuffer;
|
||||||
@ -91,30 +90,6 @@ public:
|
|||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for {} query node", getNodeTypeName());
|
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for {} query node", getNodeTypeName());
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if node has constant value
|
|
||||||
bool hasConstantValue() const
|
|
||||||
{
|
|
||||||
return getConstantValueOrNull() != nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Returns constant value with type if node has constant value, and can be replaced with it.
|
|
||||||
* Examples: scalar subquery, function with constant arguments.
|
|
||||||
*/
|
|
||||||
virtual const ConstantValue & getConstantValue() const
|
|
||||||
{
|
|
||||||
auto constant_value = getConstantValueOrNull();
|
|
||||||
if (!constant_value)
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Node does not have constant value");
|
|
||||||
|
|
||||||
return *constant_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns constant value with type if node has constant value or null otherwise
|
|
||||||
virtual ConstantValuePtr getConstantValueOrNull() const
|
|
||||||
{
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Is tree equal to other tree with node root.
|
/** Is tree equal to other tree with node root.
|
||||||
*
|
*
|
||||||
* Aliases of query tree nodes are compared during isEqual call.
|
* Aliases of query tree nodes are compared during isEqual call.
|
||||||
|
@ -89,11 +89,6 @@ public:
|
|||||||
return getExpression()->getResultType();
|
return getExpression()->getResultType();
|
||||||
}
|
}
|
||||||
|
|
||||||
ConstantValuePtr getConstantValueOrNull() const override
|
|
||||||
{
|
|
||||||
return getExpression()->getConstantValueOrNull();
|
|
||||||
}
|
|
||||||
|
|
||||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -88,8 +89,8 @@ public:
|
|||||||
if (!supported_function_it->second.contains(inner_function_name))
|
if (!supported_function_it->second.contains(inner_function_name))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto left_argument_constant_value = inner_function_arguments_nodes[0]->getConstantValueOrNull();
|
const auto * left_argument_constant_node = inner_function_arguments_nodes[0]->as<ConstantNode>();
|
||||||
auto right_argument_constant_value = inner_function_arguments_nodes[1]->getConstantValueOrNull();
|
const auto * right_argument_constant_node = inner_function_arguments_nodes[1]->as<ConstantNode>();
|
||||||
|
|
||||||
/** If we extract negative constant, aggregate function name must be updated.
|
/** If we extract negative constant, aggregate function name must be updated.
|
||||||
*
|
*
|
||||||
@ -105,14 +106,14 @@ public:
|
|||||||
function_name_if_constant_is_negative = "min";
|
function_name_if_constant_is_negative = "min";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (left_argument_constant_value && !right_argument_constant_value)
|
if (left_argument_constant_node && !right_argument_constant_node)
|
||||||
{
|
{
|
||||||
/// Do not rewrite `sum(1/n)` with `sum(1) * div(1/n)` because of lose accuracy
|
/// Do not rewrite `sum(1/n)` with `sum(1) * div(1/n)` because of lose accuracy
|
||||||
if (inner_function_name == "divide")
|
if (inner_function_name == "divide")
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/// Rewrite `aggregate_function(inner_function(constant, argument))` into `inner_function(constant, aggregate_function(argument))`
|
/// Rewrite `aggregate_function(inner_function(constant, argument))` into `inner_function(constant, aggregate_function(argument))`
|
||||||
const auto & left_argument_constant_value_literal = left_argument_constant_value->getValue();
|
const auto & left_argument_constant_value_literal = left_argument_constant_node->getValue();
|
||||||
if (!function_name_if_constant_is_negative.empty() &&
|
if (!function_name_if_constant_is_negative.empty() &&
|
||||||
left_argument_constant_value_literal < zeroField(left_argument_constant_value_literal))
|
left_argument_constant_value_literal < zeroField(left_argument_constant_value_literal))
|
||||||
{
|
{
|
||||||
@ -125,10 +126,10 @@ public:
|
|||||||
inner_function_arguments_nodes[1] = node;
|
inner_function_arguments_nodes[1] = node;
|
||||||
node = std::move(inner_function);
|
node = std::move(inner_function);
|
||||||
}
|
}
|
||||||
else if (right_argument_constant_value)
|
else if (right_argument_constant_node)
|
||||||
{
|
{
|
||||||
/// Rewrite `aggregate_function(inner_function(argument, constant))` into `inner_function(aggregate_function(argument), constant)`
|
/// Rewrite `aggregate_function(inner_function(argument, constant))` into `inner_function(aggregate_function(argument), constant)`
|
||||||
const auto & right_argument_constant_value_literal = right_argument_constant_value->getValue();
|
const auto & right_argument_constant_value_literal = right_argument_constant_node->getValue();
|
||||||
if (!function_name_if_constant_is_negative.empty() &&
|
if (!function_name_if_constant_is_negative.empty() &&
|
||||||
right_argument_constant_value_literal < zeroField(right_argument_constant_value_literal))
|
right_argument_constant_value_literal < zeroField(right_argument_constant_value_literal))
|
||||||
{
|
{
|
||||||
|
@ -139,14 +139,14 @@ public:
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
auto second_argument_constant_value = function_arguments_nodes[1]->getConstantValueOrNull();
|
const auto * second_argument_constant_node = function_arguments_nodes[1]->as<ConstantNode>();
|
||||||
|
|
||||||
if (function_name == "tupleElement" && column_type.isTuple() && second_argument_constant_value)
|
if (function_name == "tupleElement" && column_type.isTuple() && second_argument_constant_node)
|
||||||
{
|
{
|
||||||
/** Replace `tupleElement(tuple_argument, string_literal)`, `tupleElement(tuple_argument, integer_literal)`
|
/** Replace `tupleElement(tuple_argument, string_literal)`, `tupleElement(tuple_argument, integer_literal)`
|
||||||
* with `tuple_argument.column_name`.
|
* with `tuple_argument.column_name`.
|
||||||
*/
|
*/
|
||||||
const auto & tuple_element_constant_value = second_argument_constant_value->getValue();
|
const auto & tuple_element_constant_value = second_argument_constant_node->getValue();
|
||||||
const auto & tuple_element_constant_value_type = tuple_element_constant_value.getType();
|
const auto & tuple_element_constant_value_type = tuple_element_constant_value.getType();
|
||||||
|
|
||||||
const auto & data_type_tuple = assert_cast<const DataTypeTuple &>(*column.type);
|
const auto & data_type_tuple = assert_cast<const DataTypeTuple &>(*column.type);
|
||||||
|
@ -1,19 +1,19 @@
|
|||||||
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
#include <Analyzer/Passes/FuseFunctionsPass.h>
|
||||||
|
|
||||||
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
|
||||||
#include <Functions/FunctionFactory.h>
|
|
||||||
|
|
||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
|
||||||
#include <Analyzer/FunctionNode.h>
|
|
||||||
#include <Analyzer/ConstantNode.h>
|
|
||||||
#include <Analyzer/HashUtils.h>
|
|
||||||
|
|
||||||
#include <DataTypes/DataTypesNumber.h>
|
#include <DataTypes/DataTypesNumber.h>
|
||||||
#include <DataTypes/DataTypeArray.h>
|
#include <DataTypes/DataTypeArray.h>
|
||||||
#include <DataTypes/DataTypeTuple.h>
|
#include <DataTypes/DataTypeTuple.h>
|
||||||
|
|
||||||
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
#include <Analyzer/HashUtils.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -49,11 +49,11 @@ public:
|
|||||||
/// Do not apply for `count()` with without arguments or `count(*)`, only `count(x)` is supported.
|
/// Do not apply for `count()` with without arguments or `count(*)`, only `count(x)` is supported.
|
||||||
return;
|
return;
|
||||||
|
|
||||||
argument_to_functions_mapping[argument_nodes[0]].push_back(&node);
|
argument_to_functions_mapping[argument_nodes[0]].insert(&node);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// argument -> list of sum/count/avg functions with this argument
|
/// argument -> list of sum/count/avg functions with this argument
|
||||||
QueryTreeNodePtrWithHashMap<std::vector<QueryTreeNodePtr *>> argument_to_functions_mapping;
|
QueryTreeNodePtrWithHashMap<std::unordered_set<QueryTreeNodePtr *>> argument_to_functions_mapping;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
std::unordered_set<String> names_to_collect;
|
std::unordered_set<String> names_to_collect;
|
||||||
@ -79,6 +79,14 @@ FunctionNodePtr createResolvedAggregateFunction(const String & name, const Query
|
|||||||
function_node->resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType());
|
function_node->resolveAsAggregateFunction(aggregate_function, aggregate_function->getReturnType());
|
||||||
function_node->getArguments().getNodes() = { argument };
|
function_node->getArguments().getNodes() = { argument };
|
||||||
|
|
||||||
|
if (!parameters.empty())
|
||||||
|
{
|
||||||
|
QueryTreeNodes parameter_nodes;
|
||||||
|
for (const auto & param : parameters)
|
||||||
|
parameter_nodes.emplace_back(std::make_shared<ConstantNode>(param));
|
||||||
|
function_node->getParameters().getNodes() = std::move(parameter_nodes);
|
||||||
|
}
|
||||||
|
|
||||||
return function_node;
|
return function_node;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +136,9 @@ void replaceWithSumCount(QueryTreeNodePtr & node, const FunctionNodePtr & sum_co
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> nodes, const QueryTreeNodePtr & argument)
|
/// Reorder nodes according to the value of the quantile level parameter.
|
||||||
|
/// Levels are sorted in ascending order to make pass result deterministic.
|
||||||
|
FunctionNodePtr createFusedQuantilesNode(std::vector<QueryTreeNodePtr *> & nodes, const QueryTreeNodePtr & argument)
|
||||||
{
|
{
|
||||||
Array parameters;
|
Array parameters;
|
||||||
parameters.reserve(nodes.size());
|
parameters.reserve(nodes.size());
|
||||||
@ -148,11 +158,38 @@ FunctionNodePtr createFusedQuantilesNode(const std::vector<QueryTreeNodePtr *> n
|
|||||||
if (parameter_nodes.size() != 1)
|
if (parameter_nodes.size() != 1)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function '{}' should have exactly one parameter", function_name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function '{}' should have exactly one parameter", function_name);
|
||||||
|
|
||||||
const auto & constant_value = parameter_nodes.front()->getConstantValueOrNull();
|
const auto * constant_node = parameter_nodes.front()->as<ConstantNode>();
|
||||||
if (!constant_value)
|
if (!constant_node)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function '{}' should have constant parameter", function_name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Function '{}' should have constant parameter", function_name);
|
||||||
|
|
||||||
parameters.push_back(constant_value->getValue());
|
const auto & value = constant_node->getValue();
|
||||||
|
if (value.getType() != Field::Types::Float64)
|
||||||
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
|
"Function '{}' should have parameter of type Float64, got '{}'",
|
||||||
|
function_name, value.getTypeName());
|
||||||
|
|
||||||
|
parameters.push_back(value);
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
/// Sort nodes and parameters in ascending order of quantile level
|
||||||
|
std::vector<size_t> permutation(nodes.size());
|
||||||
|
std::iota(permutation.begin(), permutation.end(), 0);
|
||||||
|
std::sort(permutation.begin(), permutation.end(), [&](size_t i, size_t j) { return parameters[i].get<Float64>() < parameters[j].get<Float64>(); });
|
||||||
|
|
||||||
|
std::vector<QueryTreeNodePtr *> new_nodes;
|
||||||
|
new_nodes.reserve(permutation.size());
|
||||||
|
|
||||||
|
Array new_parameters;
|
||||||
|
new_parameters.reserve(permutation.size());
|
||||||
|
|
||||||
|
for (size_t i : permutation)
|
||||||
|
{
|
||||||
|
new_nodes.emplace_back(nodes[i]);
|
||||||
|
new_parameters.emplace_back(std::move(parameters[i]));
|
||||||
|
}
|
||||||
|
nodes = std::move(new_nodes);
|
||||||
|
parameters = std::move(new_parameters);
|
||||||
}
|
}
|
||||||
|
|
||||||
return createResolvedAggregateFunction("quantiles", argument, parameters);
|
return createResolvedAggregateFunction("quantiles", argument, parameters);
|
||||||
@ -183,12 +220,14 @@ void tryFuseQuantiles(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
|||||||
FuseFunctionsVisitor visitor_quantile({"quantile"});
|
FuseFunctionsVisitor visitor_quantile({"quantile"});
|
||||||
visitor_quantile.visit(query_tree_node);
|
visitor_quantile.visit(query_tree_node);
|
||||||
|
|
||||||
for (auto & [argument, nodes] : visitor_quantile.argument_to_functions_mapping)
|
for (auto & [argument, nodes_set] : visitor_quantile.argument_to_functions_mapping)
|
||||||
{
|
{
|
||||||
size_t nodes_size = nodes.size();
|
size_t nodes_size = nodes_set.size();
|
||||||
if (nodes_size < 2)
|
if (nodes_size < 2)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
std::vector<QueryTreeNodePtr *> nodes(nodes_set.begin(), nodes_set.end());
|
||||||
|
|
||||||
auto quantiles_node = createFusedQuantilesNode(nodes, argument.node);
|
auto quantiles_node = createFusedQuantilesNode(nodes, argument.node);
|
||||||
auto result_array_type = std::dynamic_pointer_cast<const DataTypeArray>(quantiles_node->getResultType());
|
auto result_array_type = std::dynamic_pointer_cast<const DataTypeArray>(quantiles_node->getResultType());
|
||||||
if (!result_array_type)
|
if (!result_array_type)
|
||||||
@ -196,8 +235,11 @@ void tryFuseQuantiles(QueryTreeNodePtr query_tree_node, ContextPtr context)
|
|||||||
"Unexpected return type '{}' of function '{}', should be array",
|
"Unexpected return type '{}' of function '{}', should be array",
|
||||||
quantiles_node->getResultType(), quantiles_node->getFunctionName());
|
quantiles_node->getResultType(), quantiles_node->getFunctionName());
|
||||||
|
|
||||||
for (size_t i = 0; i < nodes_size; ++i)
|
for (size_t i = 0; i < nodes_set.size(); ++i)
|
||||||
*nodes[i] = createArrayElementFunction(context, result_array_type->getNestedType(), quantiles_node, i + 1);
|
{
|
||||||
|
size_t array_index = i + 1;
|
||||||
|
*nodes[i] = createArrayElementFunction(context, result_array_type->getNestedType(), quantiles_node, array_index);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#include <Analyzer/Passes/IfConstantConditionPass.h>
|
#include <Analyzer/Passes/IfConstantConditionPass.h>
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
|
||||||
#include <Analyzer/FunctionNode.h>
|
|
||||||
#include <Functions/FunctionFactory.h>
|
#include <Functions/FunctionFactory.h>
|
||||||
|
|
||||||
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -23,11 +25,11 @@ public:
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
auto & first_argument = function_node->getArguments().getNodes()[0];
|
auto & first_argument = function_node->getArguments().getNodes()[0];
|
||||||
auto first_argument_constant_value = first_argument->getConstantValueOrNull();
|
const auto * first_argument_constant_node = first_argument->as<ConstantNode>();
|
||||||
if (!first_argument_constant_value)
|
if (!first_argument_constant_node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & condition_value = first_argument_constant_value->getValue();
|
const auto & condition_value = first_argument_constant_node->getValue();
|
||||||
|
|
||||||
bool condition_boolean_value = false;
|
bool condition_boolean_value = false;
|
||||||
|
|
||||||
|
@ -4,6 +4,7 @@
|
|||||||
#include <AggregateFunctions/IAggregateFunction.h>
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -25,11 +26,11 @@ public:
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
auto & first_argument = function_node->getArguments().getNodes()[0];
|
auto & first_argument = function_node->getArguments().getNodes()[0];
|
||||||
auto first_argument_constant_value = first_argument->getConstantValueOrNull();
|
auto * first_argument_constant_node = first_argument->as<ConstantNode>();
|
||||||
if (!first_argument_constant_value)
|
if (!first_argument_constant_node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & first_argument_constant_literal = first_argument_constant_value->getValue();
|
const auto & first_argument_constant_literal = first_argument_constant_node->getValue();
|
||||||
|
|
||||||
if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull())
|
if (function_node->getFunctionName() == "count" && !first_argument_constant_literal.isNull())
|
||||||
{
|
{
|
||||||
|
@ -1680,9 +1680,6 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, size
|
|||||||
node->getNodeTypeName(),
|
node->getNodeTypeName(),
|
||||||
node->formatASTForErrorMessage());
|
node->formatASTForErrorMessage());
|
||||||
|
|
||||||
if (node->hasConstantValue())
|
|
||||||
return;
|
|
||||||
|
|
||||||
auto subquery_context = Context::createCopy(context);
|
auto subquery_context = Context::createCopy(context);
|
||||||
|
|
||||||
Settings subquery_settings = context->getSettings();
|
Settings subquery_settings = context->getSettings();
|
||||||
@ -1721,12 +1718,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, size
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto constant_value = std::make_shared<ConstantValue>(Null(), std::move(type));
|
auto constant_value = std::make_shared<ConstantValue>(Null(), std::move(type));
|
||||||
|
node = std::make_shared<ConstantNode>(std::move(constant_value), node);
|
||||||
if (query_node)
|
|
||||||
query_node->performConstantFolding(std::move(constant_value));
|
|
||||||
else if (union_node)
|
|
||||||
union_node->performConstantFolding(std::move(constant_value));
|
|
||||||
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1771,10 +1763,7 @@ void QueryAnalyzer::evaluateScalarSubqueryIfNeeded(QueryTreeNodePtr & node, size
|
|||||||
}
|
}
|
||||||
|
|
||||||
auto constant_value = std::make_shared<ConstantValue>(std::move(scalar_value), std::move(scalar_type));
|
auto constant_value = std::make_shared<ConstantValue>(std::move(scalar_value), std::move(scalar_type));
|
||||||
if (query_node)
|
node = std::make_shared<ConstantNode>(std::move(constant_value), node);
|
||||||
query_node->performConstantFolding(std::move(constant_value));
|
|
||||||
else if (union_node)
|
|
||||||
union_node->performConstantFolding(std::move(constant_value));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void QueryAnalyzer::mergeWindowWithParentWindow(const QueryTreeNodePtr & window_node, const QueryTreeNodePtr & parent_window_node, IdentifierResolveScope & scope)
|
void QueryAnalyzer::mergeWindowWithParentWindow(const QueryTreeNodePtr & window_node, const QueryTreeNodePtr & parent_window_node, IdentifierResolveScope & scope)
|
||||||
@ -1867,15 +1856,15 @@ void QueryAnalyzer::replaceNodesWithPositionalArguments(QueryTreeNodePtr & node_
|
|||||||
|
|
||||||
void QueryAnalyzer::validateLimitOffsetExpression(QueryTreeNodePtr & expression_node, const String & expression_description, IdentifierResolveScope & scope)
|
void QueryAnalyzer::validateLimitOffsetExpression(QueryTreeNodePtr & expression_node, const String & expression_description, IdentifierResolveScope & scope)
|
||||||
{
|
{
|
||||||
const auto limit_offset_constant_value = expression_node->getConstantValueOrNull();
|
const auto * limit_offset_constant_node = expression_node->as<ConstantNode>();
|
||||||
if (!limit_offset_constant_value || !isNativeNumber(removeNullable(limit_offset_constant_value->getType())))
|
if (!limit_offset_constant_node || !isNativeNumber(removeNullable(limit_offset_constant_node->getResultType())))
|
||||||
throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION,
|
||||||
"{} expression must be constant with numeric type. Actual {}. In scope {}",
|
"{} expression must be constant with numeric type. Actual {}. In scope {}",
|
||||||
expression_description,
|
expression_description,
|
||||||
expression_node->formatASTForErrorMessage(),
|
expression_node->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
Field converted = convertFieldToType(limit_offset_constant_value->getValue(), DataTypeUInt64());
|
Field converted = convertFieldToType(limit_offset_constant_node->getValue(), DataTypeUInt64());
|
||||||
if (converted.isNull())
|
if (converted.isNull())
|
||||||
throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_LIMIT_EXPRESSION,
|
||||||
"{} numeric constant expression is not representable as UInt64",
|
"{} numeric constant expression is not representable as UInt64",
|
||||||
@ -2465,23 +2454,19 @@ QueryTreeNodePtr QueryAnalyzer::tryResolveIdentifierFromTableExpression(const Id
|
|||||||
result_column = it->second;
|
result_column = it->second;
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr result_expression;
|
QueryTreeNodePtr result_expression = result_column;
|
||||||
bool clone_is_needed = true;
|
bool clone_is_needed = true;
|
||||||
|
|
||||||
String table_expression_source = table_expression_data.table_expression_description;
|
String table_expression_source = table_expression_data.table_expression_description;
|
||||||
if (!table_expression_data.table_expression_name.empty())
|
if (!table_expression_data.table_expression_name.empty())
|
||||||
table_expression_source += " with name " + table_expression_data.table_expression_name;
|
table_expression_source += " with name " + table_expression_data.table_expression_name;
|
||||||
|
|
||||||
if (!match_full_identifier && compound_identifier)
|
if (result_column && !match_full_identifier && compound_identifier)
|
||||||
{
|
{
|
||||||
size_t identifier_bind_size = identifier_column_qualifier_parts + 1;
|
size_t identifier_bind_size = identifier_column_qualifier_parts + 1;
|
||||||
result_expression = tryResolveIdentifierFromCompoundExpression(identifier_lookup.identifier, identifier_bind_size, result_column, table_expression_source, scope);
|
result_expression = tryResolveIdentifierFromCompoundExpression(identifier_lookup.identifier, identifier_bind_size, result_column, table_expression_source, scope);
|
||||||
clone_is_needed = false;
|
clone_is_needed = false;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
result_expression = result_column;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!result_expression)
|
if (!result_expression)
|
||||||
{
|
{
|
||||||
@ -2882,9 +2867,9 @@ IdentifierResolveResult QueryAnalyzer::tryResolveIdentifierInParentScopes(const
|
|||||||
{
|
{
|
||||||
return lookup_result;
|
return lookup_result;
|
||||||
}
|
}
|
||||||
else if (const auto constant_value = resolved_identifier->getConstantValueOrNull())
|
else if (resolved_identifier->as<ConstantNode>())
|
||||||
{
|
{
|
||||||
lookup_result.resolved_identifier = std::make_shared<ConstantNode>(constant_value);
|
lookup_result.resolved_identifier = resolved_identifier;
|
||||||
return lookup_result;
|
return lookup_result;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3790,14 +3775,14 @@ ProjectionName QueryAnalyzer::resolveWindow(QueryTreeNodePtr & node, IdentifierR
|
|||||||
false /*allow_lambda_expression*/,
|
false /*allow_lambda_expression*/,
|
||||||
false /*allow_table_expression*/);
|
false /*allow_table_expression*/);
|
||||||
|
|
||||||
const auto window_frame_begin_constant_value = window_node.getFrameBeginOffsetNode()->getConstantValueOrNull();
|
const auto * window_frame_begin_constant_node = window_node.getFrameBeginOffsetNode()->as<ConstantNode>();
|
||||||
if (!window_frame_begin_constant_value || !isNativeNumber(removeNullable(window_frame_begin_constant_value->getType())))
|
if (!window_frame_begin_constant_node || !isNativeNumber(removeNullable(window_frame_begin_constant_node->getResultType())))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}",
|
"Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}",
|
||||||
window_node.getFrameBeginOffsetNode()->formatASTForErrorMessage(),
|
window_node.getFrameBeginOffsetNode()->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
window_node.getWindowFrame().begin_offset = window_frame_begin_constant_value->getValue();
|
window_node.getWindowFrame().begin_offset = window_frame_begin_constant_node->getValue();
|
||||||
if (frame_begin_offset_projection_names.size() != 1)
|
if (frame_begin_offset_projection_names.size() != 1)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"Window FRAME begin offset expected 1 projection name. Actual {}",
|
"Window FRAME begin offset expected 1 projection name. Actual {}",
|
||||||
@ -3811,14 +3796,14 @@ ProjectionName QueryAnalyzer::resolveWindow(QueryTreeNodePtr & node, IdentifierR
|
|||||||
false /*allow_lambda_expression*/,
|
false /*allow_lambda_expression*/,
|
||||||
false /*allow_table_expression*/);
|
false /*allow_table_expression*/);
|
||||||
|
|
||||||
const auto window_frame_end_constant_value = window_node.getFrameEndOffsetNode()->getConstantValueOrNull();
|
const auto * window_frame_end_constant_node = window_node.getFrameEndOffsetNode()->as<ConstantNode>();
|
||||||
if (!window_frame_end_constant_value || !isNativeNumber(removeNullable(window_frame_end_constant_value->getType())))
|
if (!window_frame_end_constant_node || !isNativeNumber(removeNullable(window_frame_end_constant_node->getResultType())))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}",
|
"Window frame begin OFFSET expression must be constant with numeric type. Actual {}. In scope {}",
|
||||||
window_node.getFrameEndOffsetNode()->formatASTForErrorMessage(),
|
window_node.getFrameEndOffsetNode()->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
window_node.getWindowFrame().end_offset = window_frame_end_constant_value->getValue();
|
window_node.getWindowFrame().end_offset = window_frame_end_constant_node->getValue();
|
||||||
if (frame_end_offset_projection_names.size() != 1)
|
if (frame_end_offset_projection_names.size() != 1)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
throw Exception(ErrorCodes::LOGICAL_ERROR,
|
||||||
"Window FRAME begin offset expected 1 projection name. Actual {}",
|
"Window FRAME begin offset expected 1 projection name. Actual {}",
|
||||||
@ -3980,16 +3965,15 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
|
|
||||||
for (auto & parameter_node : parameters_nodes)
|
for (auto & parameter_node : parameters_nodes)
|
||||||
{
|
{
|
||||||
auto constant_value = parameter_node->getConstantValueOrNull();
|
const auto * constant_node = parameter_node->as<ConstantNode>();
|
||||||
|
if (!constant_node)
|
||||||
if (!constant_value)
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
||||||
"Parameter for function {} expected to have constant value. Actual {}. In scope {}",
|
"Parameter for function {} expected to have constant value. Actual {}. In scope {}",
|
||||||
function_name,
|
function_name,
|
||||||
parameter_node->formatASTForErrorMessage(),
|
parameter_node->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
parameters.push_back(constant_value->getValue());
|
parameters.push_back(constant_node->getValue());
|
||||||
}
|
}
|
||||||
|
|
||||||
//// If function node is not window function try to lookup function node name as lambda identifier.
|
//// If function node is not window function try to lookup function node name as lambda identifier.
|
||||||
@ -4063,6 +4047,7 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
in_subquery->getJoinTree() = exists_subquery_argument;
|
in_subquery->getJoinTree() = exists_subquery_argument;
|
||||||
in_subquery->getLimit() = std::make_shared<ConstantNode>(1UL, constant_data_type);
|
in_subquery->getLimit() = std::make_shared<ConstantNode>(1UL, constant_data_type);
|
||||||
in_subquery->resolveProjectionColumns({NameAndTypePair("1", constant_data_type)});
|
in_subquery->resolveProjectionColumns({NameAndTypePair("1", constant_data_type)});
|
||||||
|
in_subquery->setIsSubquery(true);
|
||||||
|
|
||||||
function_node_ptr = std::make_shared<FunctionNode>("in");
|
function_node_ptr = std::make_shared<FunctionNode>("in");
|
||||||
function_node_ptr->getArguments().getNodes() = {std::make_shared<ConstantNode>(1UL, constant_data_type), in_subquery};
|
function_node_ptr->getArguments().getNodes() = {std::make_shared<ConstantNode>(1UL, constant_data_type), in_subquery};
|
||||||
@ -4145,14 +4130,12 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
auto & function_argument = function_arguments[function_argument_index];
|
auto & function_argument = function_arguments[function_argument_index];
|
||||||
|
|
||||||
ColumnWithTypeAndName argument_column;
|
ColumnWithTypeAndName argument_column;
|
||||||
bool argument_is_lambda = false;
|
|
||||||
|
|
||||||
/** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction
|
/** If function argument is lambda, save lambda argument index and initialize argument type as DataTypeFunction
|
||||||
* where function argument types are initialized with empty array of lambda arguments size.
|
* where function argument types are initialized with empty array of lambda arguments size.
|
||||||
*/
|
*/
|
||||||
if (const auto * lambda_node = function_argument->as<const LambdaNode>())
|
if (const auto * lambda_node = function_argument->as<const LambdaNode>())
|
||||||
{
|
{
|
||||||
argument_is_lambda = true;
|
|
||||||
size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size();
|
size_t lambda_arguments_size = lambda_node->getArguments().getNodes().size();
|
||||||
argument_column.type = std::make_shared<DataTypeFunction>(DataTypes(lambda_arguments_size, nullptr), nullptr);
|
argument_column.type = std::make_shared<DataTypeFunction>(DataTypes(lambda_arguments_size, nullptr), nullptr);
|
||||||
function_lambda_arguments_indexes.push_back(function_argument_index);
|
function_lambda_arguments_indexes.push_back(function_argument_index);
|
||||||
@ -4175,11 +4158,11 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
function_node.getFunctionName(),
|
function_node.getFunctionName(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
const auto constant_value = function_argument->getConstantValueOrNull();
|
const auto * constant_node = function_argument->as<ConstantNode>();
|
||||||
if (!argument_is_lambda && constant_value)
|
if (constant_node)
|
||||||
{
|
{
|
||||||
argument_column.column = constant_value->getType()->createColumnConst(1, constant_value->getValue());
|
argument_column.column = constant_node->getResultType()->createColumnConst(1, constant_node->getValue());
|
||||||
argument_column.type = constant_value->getType();
|
argument_column.type = constant_node->getResultType();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
@ -4495,25 +4478,31 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
*
|
*
|
||||||
* Example: SELECT * FROM test_table LIMIT 1 IN 1;
|
* Example: SELECT * FROM test_table LIMIT 1 IN 1;
|
||||||
*/
|
*/
|
||||||
if (is_special_function_in &&
|
if (is_special_function_in)
|
||||||
function_arguments.at(0)->hasConstantValue() &&
|
|
||||||
function_arguments.at(1)->hasConstantValue())
|
|
||||||
{
|
{
|
||||||
const auto & first_argument_constant_value = function_arguments[0]->getConstantValue();
|
const auto * first_argument_constant_node = function_arguments[0]->as<ConstantNode>();
|
||||||
const auto & second_argument_constant_value = function_arguments[1]->getConstantValue();
|
const auto * second_argument_constant_node = function_arguments[1]->as<ConstantNode>();
|
||||||
|
|
||||||
const auto & first_argument_constant_type = first_argument_constant_value.getType();
|
if (first_argument_constant_node && second_argument_constant_node)
|
||||||
const auto & second_argument_constant_literal = second_argument_constant_value.getValue();
|
{
|
||||||
const auto & second_argument_constant_type = second_argument_constant_value.getType();
|
const auto & first_argument_constant_type = first_argument_constant_node->getResultType();
|
||||||
|
const auto & second_argument_constant_literal = second_argument_constant_node->getValue();
|
||||||
|
const auto & second_argument_constant_type = second_argument_constant_node->getResultType();
|
||||||
|
|
||||||
auto set = makeSetForConstantValue(first_argument_constant_type, second_argument_constant_literal, second_argument_constant_type, scope.context->getSettingsRef());
|
auto set = makeSetForConstantValue(first_argument_constant_type,
|
||||||
|
second_argument_constant_literal,
|
||||||
|
second_argument_constant_type,
|
||||||
|
scope.context->getSettingsRef());
|
||||||
|
|
||||||
/// Create constant set column for constant folding
|
/// Create constant set column for constant folding
|
||||||
|
|
||||||
auto column_set = ColumnSet::create(1, std::move(set));
|
auto column_set = ColumnSet::create(1, std::move(set));
|
||||||
argument_columns[1].column = ColumnConst::create(std::move(column_set), 1);
|
argument_columns[1].column = ColumnConst::create(std::move(column_set), 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
std::shared_ptr<ConstantValue> constant_value;
|
||||||
|
|
||||||
DataTypePtr result_type;
|
DataTypePtr result_type;
|
||||||
|
|
||||||
try
|
try
|
||||||
@ -4544,10 +4533,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
if (column && isColumnConst(*column))
|
if (column && isColumnConst(*column))
|
||||||
{
|
{
|
||||||
/// Replace function node with result constant node
|
/// Replace function node with result constant node
|
||||||
Field constant_value;
|
Field column_constant_value;
|
||||||
column->get(0, constant_value);
|
column->get(0, column_constant_value);
|
||||||
|
constant_value = std::make_shared<ConstantValue>(std::move(column_constant_value), result_type);
|
||||||
function_node.performConstantFolding(std::make_shared<ConstantValue>(std::move(constant_value), result_type));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4559,6 +4547,9 @@ ProjectionNames QueryAnalyzer::resolveFunction(QueryTreeNodePtr & node, Identifi
|
|||||||
|
|
||||||
function_node.resolveAsFunction(std::move(function), std::move(result_type));
|
function_node.resolveAsFunction(std::move(function), std::move(result_type));
|
||||||
|
|
||||||
|
if (constant_value)
|
||||||
|
node = std::make_shared<ConstantNode>(std::move(constant_value), node);
|
||||||
|
|
||||||
return result_projection_names;
|
return result_projection_names;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4979,8 +4970,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
|
|||||||
{
|
{
|
||||||
fill_from_expression_projection_names = resolveExpressionNode(sort_node.getFillFrom(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
fill_from_expression_projection_names = resolveExpressionNode(sort_node.getFillFrom(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
const auto constant_value = sort_node.getFillFrom()->getConstantValueOrNull();
|
const auto * constant_node = sort_node.getFillFrom()->as<ConstantNode>();
|
||||||
if (!constant_value || !isColumnedAsNumber(constant_value->getType()))
|
if (!constant_node || !isColumnedAsNumber(constant_node->getResultType()))
|
||||||
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
||||||
"Sort FILL FROM expression must be constant with numeric type. Actual {}. In scope {}",
|
"Sort FILL FROM expression must be constant with numeric type. Actual {}. In scope {}",
|
||||||
sort_node.getFillFrom()->formatASTForErrorMessage(),
|
sort_node.getFillFrom()->formatASTForErrorMessage(),
|
||||||
@ -4997,8 +4988,8 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
|
|||||||
{
|
{
|
||||||
fill_to_expression_projection_names = resolveExpressionNode(sort_node.getFillTo(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
fill_to_expression_projection_names = resolveExpressionNode(sort_node.getFillTo(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
const auto constant_value = sort_node.getFillTo()->getConstantValueOrNull();
|
const auto * constant_node = sort_node.getFillTo()->as<ConstantNode>();
|
||||||
if (!constant_value || !isColumnedAsNumber(constant_value->getType()))
|
if (!constant_node || !isColumnedAsNumber(constant_node->getResultType()))
|
||||||
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
||||||
"Sort FILL TO expression must be constant with numeric type. Actual {}. In scope {}",
|
"Sort FILL TO expression must be constant with numeric type. Actual {}. In scope {}",
|
||||||
sort_node.getFillFrom()->formatASTForErrorMessage(),
|
sort_node.getFillFrom()->formatASTForErrorMessage(),
|
||||||
@ -5015,15 +5006,15 @@ ProjectionNames QueryAnalyzer::resolveSortNodeList(QueryTreeNodePtr & sort_node_
|
|||||||
{
|
{
|
||||||
fill_step_expression_projection_names = resolveExpressionNode(sort_node.getFillStep(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
fill_step_expression_projection_names = resolveExpressionNode(sort_node.getFillStep(), scope, false /*allow_lambda_expression*/, false /*allow_table_expression*/);
|
||||||
|
|
||||||
const auto constant_value = sort_node.getFillStep()->getConstantValueOrNull();
|
const auto * constant_node = sort_node.getFillStep()->as<ConstantNode>();
|
||||||
if (!constant_value)
|
if (!constant_node)
|
||||||
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
||||||
"Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}",
|
"Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}",
|
||||||
sort_node.getFillStep()->formatASTForErrorMessage(),
|
sort_node.getFillStep()->formatASTForErrorMessage(),
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
scope.scope_node->formatASTForErrorMessage());
|
||||||
|
|
||||||
bool is_number = isColumnedAsNumber(constant_value->getType());
|
bool is_number = isColumnedAsNumber(constant_node->getResultType());
|
||||||
bool is_interval = WhichDataType(constant_value->getType()).isInterval();
|
bool is_interval = WhichDataType(constant_node->getResultType()).isInterval();
|
||||||
if (!is_number && !is_interval)
|
if (!is_number && !is_interval)
|
||||||
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
throw Exception(ErrorCodes::INVALID_WITH_FILL_EXPRESSION,
|
||||||
"Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}",
|
"Sort FILL STEP expression must be constant with numeric or interval type. Actual {}. In scope {}",
|
||||||
@ -5455,25 +5446,7 @@ void QueryAnalyzer::resolveQueryJoinTreeNode(QueryTreeNodePtr & join_tree_node,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO: Special functions that can take query
|
resolveExpressionNodeList(table_function_node.getArgumentsNode(), scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
|
||||||
/// TODO: Support qualified matchers for table function
|
|
||||||
|
|
||||||
for (auto & argument_node : table_function_node.getArguments().getNodes())
|
|
||||||
{
|
|
||||||
if (argument_node->getNodeType() == QueryTreeNodeType::MATCHER)
|
|
||||||
{
|
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS,
|
|
||||||
"Matcher as table function argument is not supported {}. In scope {}",
|
|
||||||
join_tree_node->formatASTForErrorMessage(),
|
|
||||||
scope.scope_node->formatASTForErrorMessage());
|
|
||||||
}
|
|
||||||
|
|
||||||
auto * function_node = argument_node->as<FunctionNode>();
|
|
||||||
if (function_node && table_function_factory.hasNameOrAlias(function_node->getFunctionName()))
|
|
||||||
continue;
|
|
||||||
|
|
||||||
resolveExpressionNode(argument_node, scope, false /*allow_lambda_expression*/, true /*allow_table_expression*/);
|
|
||||||
}
|
|
||||||
|
|
||||||
auto table_function_ast = table_function_node.toAST();
|
auto table_function_ast = table_function_node.toAST();
|
||||||
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
table_function_ptr->parseArguments(table_function_ast, scope_context);
|
||||||
@ -6151,7 +6124,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
auto & grouping_set_keys = node->as<ListNode &>();
|
auto & grouping_set_keys = node->as<ListNode &>();
|
||||||
for (auto & grouping_set_key : grouping_set_keys.getNodes())
|
for (auto & grouping_set_key : grouping_set_keys.getNodes())
|
||||||
{
|
{
|
||||||
if (grouping_set_key->hasConstantValue())
|
if (grouping_set_key->as<ConstantNode>())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
group_by_keys_nodes.push_back(grouping_set_key);
|
group_by_keys_nodes.push_back(grouping_set_key);
|
||||||
@ -6159,7 +6132,7 @@ void QueryAnalyzer::resolveQuery(const QueryTreeNodePtr & query_node, Identifier
|
|||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (node->hasConstantValue())
|
if (node->as<ConstantNode>())
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
group_by_keys_nodes.push_back(node);
|
group_by_keys_nodes.push_back(node);
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
|
#include <Analyzer/ConstantNode.h>
|
||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -47,11 +48,11 @@ public:
|
|||||||
if (function_node_arguments_nodes.size() != 2)
|
if (function_node_arguments_nodes.size() != 2)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto constant_value = function_node_arguments_nodes[0]->getConstantValueOrNull();
|
const auto * constant_node = function_node_arguments_nodes[0]->as<ConstantNode>();
|
||||||
if (!constant_value)
|
if (!constant_node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & constant_value_literal = constant_value->getValue();
|
const auto & constant_value_literal = constant_node->getValue();
|
||||||
if (!isInt64OrUInt64FieldType(constant_value_literal.getType()))
|
if (!isInt64OrUInt64FieldType(constant_value_literal.getType()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
@ -61,7 +62,7 @@ public:
|
|||||||
function_node_arguments_nodes[0] = std::move(function_node_arguments_nodes[1]);
|
function_node_arguments_nodes[0] = std::move(function_node_arguments_nodes[1]);
|
||||||
function_node_arguments_nodes.resize(1);
|
function_node_arguments_nodes.resize(1);
|
||||||
|
|
||||||
resolveAggregateFunctionNode(*function_node, "countIf");
|
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,14 +81,14 @@ public:
|
|||||||
if (nested_if_function_arguments_nodes.size() != 3)
|
if (nested_if_function_arguments_nodes.size() != 3)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
auto if_true_condition_constant_value = nested_if_function_arguments_nodes[1]->getConstantValueOrNull();
|
const auto * if_true_condition_constant_node = nested_if_function_arguments_nodes[1]->as<ConstantNode>();
|
||||||
auto if_false_condition_constant_value = nested_if_function_arguments_nodes[2]->getConstantValueOrNull();
|
const auto * if_false_condition_constant_node = nested_if_function_arguments_nodes[2]->as<ConstantNode>();
|
||||||
|
|
||||||
if (!if_true_condition_constant_value || !if_false_condition_constant_value)
|
if (!if_true_condition_constant_node || !if_false_condition_constant_node)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
const auto & if_true_condition_constant_value_literal = if_true_condition_constant_value->getValue();
|
const auto & if_true_condition_constant_value_literal = if_true_condition_constant_node->getValue();
|
||||||
const auto & if_false_condition_constant_value_literal = if_false_condition_constant_value->getValue();
|
const auto & if_false_condition_constant_value_literal = if_false_condition_constant_node->getValue();
|
||||||
|
|
||||||
if (!isInt64OrUInt64FieldType(if_true_condition_constant_value_literal.getType()) ||
|
if (!isInt64OrUInt64FieldType(if_true_condition_constant_value_literal.getType()) ||
|
||||||
!isInt64OrUInt64FieldType(if_false_condition_constant_value_literal.getType()))
|
!isInt64OrUInt64FieldType(if_false_condition_constant_value_literal.getType()))
|
||||||
@ -102,15 +103,16 @@ public:
|
|||||||
function_node_arguments_nodes[0] = std::move(nested_if_function_arguments_nodes[0]);
|
function_node_arguments_nodes[0] = std::move(nested_if_function_arguments_nodes[0]);
|
||||||
function_node_arguments_nodes.resize(1);
|
function_node_arguments_nodes.resize(1);
|
||||||
|
|
||||||
resolveAggregateFunctionNode(*function_node, "countIf");
|
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`.
|
/// Rewrite `sum(if(cond, 0, 1))` into `countIf(not(cond))`.
|
||||||
if (if_true_condition_value == 0 && if_false_condition_value == 1)
|
if (if_true_condition_value == 0 && if_false_condition_value == 1)
|
||||||
{
|
{
|
||||||
auto condition_result_type = nested_if_function_arguments_nodes[0]->getResultType();
|
|
||||||
DataTypePtr not_function_result_type = std::make_shared<DataTypeUInt8>();
|
DataTypePtr not_function_result_type = std::make_shared<DataTypeUInt8>();
|
||||||
|
|
||||||
|
const auto & condition_result_type = nested_if_function_arguments_nodes[0]->getResultType();
|
||||||
if (condition_result_type->isNullable())
|
if (condition_result_type->isNullable())
|
||||||
not_function_result_type = makeNullable(not_function_result_type);
|
not_function_result_type = makeNullable(not_function_result_type);
|
||||||
|
|
||||||
@ -123,23 +125,21 @@ public:
|
|||||||
function_node_arguments_nodes[0] = std::move(not_function);
|
function_node_arguments_nodes[0] = std::move(not_function);
|
||||||
function_node_arguments_nodes.resize(1);
|
function_node_arguments_nodes.resize(1);
|
||||||
|
|
||||||
resolveAggregateFunctionNode(*function_node, "countIf");
|
resolveAsCountIfAggregateFunction(*function_node, function_node_arguments_nodes[0]->getResultType());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
static inline void resolveAggregateFunctionNode(FunctionNode & function_node, const String & aggregate_function_name)
|
static inline void resolveAsCountIfAggregateFunction(FunctionNode & function_node, const DataTypePtr & argument_type)
|
||||||
{
|
{
|
||||||
auto function_result_type = function_node.getResultType();
|
|
||||||
auto function_aggregate_function = function_node.getAggregateFunction();
|
|
||||||
|
|
||||||
AggregateFunctionProperties properties;
|
AggregateFunctionProperties properties;
|
||||||
auto aggregate_function = AggregateFunctionFactory::instance().get(aggregate_function_name,
|
auto aggregate_function = AggregateFunctionFactory::instance().get("countIf",
|
||||||
function_aggregate_function->getArgumentTypes(),
|
{argument_type},
|
||||||
function_aggregate_function->getParameters(),
|
function_node.getAggregateFunction()->getParameters(),
|
||||||
properties);
|
properties);
|
||||||
|
|
||||||
|
auto function_result_type = function_node.getResultType();
|
||||||
function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type));
|
function_node.resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2,9 +2,13 @@
|
|||||||
|
|
||||||
#include <Functions/IFunction.h>
|
#include <Functions/IFunction.h>
|
||||||
|
|
||||||
|
#include <AggregateFunctions/AggregateFunctionFactory.h>
|
||||||
|
#include <AggregateFunctions/IAggregateFunction.h>
|
||||||
|
|
||||||
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
#include <Analyzer/InDepthQueryTreeVisitor.h>
|
||||||
#include <Analyzer/FunctionNode.h>
|
#include <Analyzer/FunctionNode.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
@ -30,7 +34,9 @@ public:
|
|||||||
if (!function_node || !function_node->isAggregateFunction() || !isUniqFunction(function_node->getFunctionName()))
|
if (!function_node || !function_node->isAggregateFunction() || !isUniqFunction(function_node->getFunctionName()))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
bool replaced_argument = false;
|
||||||
auto & uniq_function_arguments_nodes = function_node->getArguments().getNodes();
|
auto & uniq_function_arguments_nodes = function_node->getArguments().getNodes();
|
||||||
|
|
||||||
for (auto & uniq_function_argument_node : uniq_function_arguments_nodes)
|
for (auto & uniq_function_argument_node : uniq_function_arguments_nodes)
|
||||||
{
|
{
|
||||||
auto * uniq_function_argument_node_typed = uniq_function_argument_node->as<FunctionNode>();
|
auto * uniq_function_argument_node_typed = uniq_function_argument_node->as<FunctionNode>();
|
||||||
@ -49,7 +55,28 @@ public:
|
|||||||
|
|
||||||
/// Replace injective function with its single argument
|
/// Replace injective function with its single argument
|
||||||
uniq_function_argument_node = uniq_function_argument_node_argument_nodes[0];
|
uniq_function_argument_node = uniq_function_argument_node_argument_nodes[0];
|
||||||
|
replaced_argument = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!replaced_argument)
|
||||||
|
return;
|
||||||
|
|
||||||
|
const auto & function_node_argument_nodes = function_node->getArguments().getNodes();
|
||||||
|
|
||||||
|
DataTypes argument_types;
|
||||||
|
argument_types.reserve(function_node_argument_nodes.size());
|
||||||
|
|
||||||
|
for (const auto & function_node_argument : function_node_argument_nodes)
|
||||||
|
argument_types.emplace_back(function_node_argument->getResultType());
|
||||||
|
|
||||||
|
AggregateFunctionProperties properties;
|
||||||
|
auto aggregate_function = AggregateFunctionFactory::instance().get(function_node->getFunctionName(),
|
||||||
|
argument_types,
|
||||||
|
function_node->getAggregateFunction()->getParameters(),
|
||||||
|
properties);
|
||||||
|
|
||||||
|
auto function_result_type = function_node->getResultType();
|
||||||
|
function_node->resolveAsAggregateFunction(std::move(aggregate_function), std::move(function_result_type));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -71,12 +71,6 @@ void QueryNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
|||||||
if (!cte_name.empty())
|
if (!cte_name.empty())
|
||||||
buffer << ", cte_name: " << cte_name;
|
buffer << ", cte_name: " << cte_name;
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
buffer << ", constant_value: " << constant_value->getValue().dump();
|
|
||||||
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (hasWith())
|
if (hasWith())
|
||||||
{
|
{
|
||||||
buffer << '\n' << std::string(indent + 2, ' ') << "WITH\n";
|
buffer << '\n' << std::string(indent + 2, ' ') << "WITH\n";
|
||||||
@ -185,13 +179,6 @@ bool QueryNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
|||||||
{
|
{
|
||||||
const auto & rhs_typed = assert_cast<const QueryNode &>(rhs);
|
const auto & rhs_typed = assert_cast<const QueryNode &>(rhs);
|
||||||
|
|
||||||
if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (constant_value && !rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (!constant_value && rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return is_subquery == rhs_typed.is_subquery &&
|
return is_subquery == rhs_typed.is_subquery &&
|
||||||
is_cte == rhs_typed.is_cte &&
|
is_cte == rhs_typed.is_cte &&
|
||||||
cte_name == rhs_typed.cte_name &&
|
cte_name == rhs_typed.cte_name &&
|
||||||
@ -231,17 +218,6 @@ void QueryNode::updateTreeHashImpl(HashState & state) const
|
|||||||
state.update(is_group_by_with_cube);
|
state.update(is_group_by_with_cube);
|
||||||
state.update(is_group_by_with_grouping_sets);
|
state.update(is_group_by_with_grouping_sets);
|
||||||
state.update(is_group_by_all);
|
state.update(is_group_by_all);
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue());
|
|
||||||
state.update(constant_dump.size());
|
|
||||||
state.update(constant_dump);
|
|
||||||
|
|
||||||
auto constant_value_type_name = constant_value->getType()->getName();
|
|
||||||
state.update(constant_value_type_name.size());
|
|
||||||
state.update(constant_value_type_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr QueryNode::cloneImpl() const
|
QueryTreeNodePtr QueryNode::cloneImpl() const
|
||||||
@ -259,7 +235,6 @@ QueryTreeNodePtr QueryNode::cloneImpl() const
|
|||||||
result_query_node->is_group_by_all = is_group_by_all;
|
result_query_node->is_group_by_all = is_group_by_all;
|
||||||
result_query_node->cte_name = cte_name;
|
result_query_node->cte_name = cte_name;
|
||||||
result_query_node->projection_columns = projection_columns;
|
result_query_node->projection_columns = projection_columns;
|
||||||
result_query_node->constant_value = constant_value;
|
|
||||||
|
|
||||||
return result_query_node;
|
return result_query_node;
|
||||||
}
|
}
|
||||||
|
@ -13,11 +13,6 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int UNSUPPORTED_METHOD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Query node represents query in query tree.
|
/** Query node represents query in query tree.
|
||||||
*
|
*
|
||||||
* Example: SELECT * FROM test_table WHERE id == 0;
|
* Example: SELECT * FROM test_table WHERE id == 0;
|
||||||
@ -553,25 +548,6 @@ public:
|
|||||||
return QueryTreeNodeType::QUERY;
|
return QueryTreeNodeType::QUERY;
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr getResultType() const override
|
|
||||||
{
|
|
||||||
if (constant_value)
|
|
||||||
return constant_value->getType();
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for non scalar query node");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform constant folding for scalar subquery node
|
|
||||||
void performConstantFolding(ConstantValuePtr constant_folded_value)
|
|
||||||
{
|
|
||||||
constant_value = std::move(constant_folded_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
ConstantValuePtr getConstantValueOrNull() const override
|
|
||||||
{
|
|
||||||
return constant_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -596,7 +572,6 @@ private:
|
|||||||
|
|
||||||
std::string cte_name;
|
std::string cte_name;
|
||||||
NamesAndTypes projection_columns;
|
NamesAndTypes projection_columns;
|
||||||
ConstantValuePtr constant_value;
|
|
||||||
SettingsChanges settings_changes;
|
SettingsChanges settings_changes;
|
||||||
|
|
||||||
static constexpr size_t with_child_index = 0;
|
static constexpr size_t with_child_index = 0;
|
||||||
|
@ -102,12 +102,6 @@ void UnionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
|||||||
if (!cte_name.empty())
|
if (!cte_name.empty())
|
||||||
buffer << ", cte_name: " << cte_name;
|
buffer << ", cte_name: " << cte_name;
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
buffer << ", constant_value: " << constant_value->getValue().dump();
|
|
||||||
buffer << ", constant_value_type: " << constant_value->getType()->getName();
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer << ", union_mode: " << toString(union_mode);
|
buffer << ", union_mode: " << toString(union_mode);
|
||||||
|
|
||||||
buffer << '\n' << std::string(indent + 2, ' ') << "QUERIES\n";
|
buffer << '\n' << std::string(indent + 2, ' ') << "QUERIES\n";
|
||||||
@ -117,12 +111,6 @@ void UnionNode::dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, s
|
|||||||
bool UnionNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
bool UnionNode::isEqualImpl(const IQueryTreeNode & rhs) const
|
||||||
{
|
{
|
||||||
const auto & rhs_typed = assert_cast<const UnionNode &>(rhs);
|
const auto & rhs_typed = assert_cast<const UnionNode &>(rhs);
|
||||||
if (constant_value && rhs_typed.constant_value && *constant_value != *rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (constant_value && !rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
else if (!constant_value && rhs_typed.constant_value)
|
|
||||||
return false;
|
|
||||||
|
|
||||||
return is_subquery == rhs_typed.is_subquery && is_cte == rhs_typed.is_cte && cte_name == rhs_typed.cte_name &&
|
return is_subquery == rhs_typed.is_subquery && is_cte == rhs_typed.is_cte && cte_name == rhs_typed.cte_name &&
|
||||||
union_mode == rhs_typed.union_mode;
|
union_mode == rhs_typed.union_mode;
|
||||||
@ -137,17 +125,6 @@ void UnionNode::updateTreeHashImpl(HashState & state) const
|
|||||||
state.update(cte_name);
|
state.update(cte_name);
|
||||||
|
|
||||||
state.update(static_cast<size_t>(union_mode));
|
state.update(static_cast<size_t>(union_mode));
|
||||||
|
|
||||||
if (constant_value)
|
|
||||||
{
|
|
||||||
auto constant_dump = applyVisitor(FieldVisitorToString(), constant_value->getValue());
|
|
||||||
state.update(constant_dump.size());
|
|
||||||
state.update(constant_dump);
|
|
||||||
|
|
||||||
auto constant_value_type_name = constant_value->getType()->getName();
|
|
||||||
state.update(constant_value_type_name.size());
|
|
||||||
state.update(constant_value_type_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
QueryTreeNodePtr UnionNode::cloneImpl() const
|
QueryTreeNodePtr UnionNode::cloneImpl() const
|
||||||
@ -157,7 +134,6 @@ QueryTreeNodePtr UnionNode::cloneImpl() const
|
|||||||
result_union_node->is_subquery = is_subquery;
|
result_union_node->is_subquery = is_subquery;
|
||||||
result_union_node->is_cte = is_cte;
|
result_union_node->is_cte = is_cte;
|
||||||
result_union_node->cte_name = cte_name;
|
result_union_node->cte_name = cte_name;
|
||||||
result_union_node->constant_value = constant_value;
|
|
||||||
|
|
||||||
return result_union_node;
|
return result_union_node;
|
||||||
}
|
}
|
||||||
|
@ -13,11 +13,6 @@
|
|||||||
namespace DB
|
namespace DB
|
||||||
{
|
{
|
||||||
|
|
||||||
namespace ErrorCodes
|
|
||||||
{
|
|
||||||
extern const int UNSUPPORTED_METHOD;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Union node represents union of queries in query tree.
|
/** Union node represents union of queries in query tree.
|
||||||
* Union node must be initialized with normalized union mode.
|
* Union node must be initialized with normalized union mode.
|
||||||
*
|
*
|
||||||
@ -119,25 +114,6 @@ public:
|
|||||||
return QueryTreeNodeType::UNION;
|
return QueryTreeNodeType::UNION;
|
||||||
}
|
}
|
||||||
|
|
||||||
DataTypePtr getResultType() const override
|
|
||||||
{
|
|
||||||
if (constant_value)
|
|
||||||
return constant_value->getType();
|
|
||||||
|
|
||||||
throw Exception(ErrorCodes::UNSUPPORTED_METHOD, "Method getResultType is not supported for non scalar union node");
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform constant folding for scalar union node
|
|
||||||
void performConstantFolding(ConstantValuePtr constant_folded_value)
|
|
||||||
{
|
|
||||||
constant_value = std::move(constant_folded_value);
|
|
||||||
}
|
|
||||||
|
|
||||||
ConstantValuePtr getConstantValueOrNull() const override
|
|
||||||
{
|
|
||||||
return constant_value;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
void dumpTreeImpl(WriteBuffer & buffer, FormatState & format_state, size_t indent) const override;
|
||||||
|
|
||||||
protected:
|
protected:
|
||||||
@ -154,7 +130,6 @@ private:
|
|||||||
bool is_cte = false;
|
bool is_cte = false;
|
||||||
std::string cte_name;
|
std::string cte_name;
|
||||||
SelectUnionMode union_mode;
|
SelectUnionMode union_mode;
|
||||||
ConstantValuePtr constant_value;
|
|
||||||
|
|
||||||
static constexpr size_t queries_child_index = 0;
|
static constexpr size_t queries_child_index = 0;
|
||||||
static constexpr size_t children_size = queries_child_index + 1;
|
static constexpr size_t children_size = queries_child_index + 1;
|
||||||
|
@ -166,7 +166,8 @@ void BackupWriterS3::copyObjectImpl(
|
|||||||
|
|
||||||
auto outcome = client->CopyObject(request);
|
auto outcome = client->CopyObject(request);
|
||||||
|
|
||||||
if (!outcome.IsSuccess() && outcome.GetError().GetExceptionName() == "EntityTooLarge")
|
if (!outcome.IsSuccess() && (outcome.GetError().GetExceptionName() == "EntityTooLarge"
|
||||||
|
|| outcome.GetError().GetExceptionName() == "InvalidRequest"))
|
||||||
{ // Can't come here with MinIO, MinIO allows single part upload for large objects.
|
{ // Can't come here with MinIO, MinIO allows single part upload for large objects.
|
||||||
copyObjectMultipartImpl(src_bucket, src_key, dst_bucket, dst_key, head, metadata);
|
copyObjectMultipartImpl(src_bucket, src_key, dst_bucket, dst_key, head, metadata);
|
||||||
return;
|
return;
|
||||||
|
@ -8,6 +8,7 @@
|
|||||||
#include <Common/StringUtils/StringUtils.h>
|
#include <Common/StringUtils/StringUtils.h>
|
||||||
#include <Common/hex.h>
|
#include <Common/hex.h>
|
||||||
#include <Common/quoteString.h>
|
#include <Common/quoteString.h>
|
||||||
|
#include <Common/XMLUtils.h>
|
||||||
#include <Interpreters/Context.h>
|
#include <Interpreters/Context.h>
|
||||||
#include <IO/Archives/IArchiveReader.h>
|
#include <IO/Archives/IArchiveReader.h>
|
||||||
#include <IO/Archives/IArchiveWriter.h>
|
#include <IO/Archives/IArchiveWriter.h>
|
||||||
@ -22,6 +23,7 @@
|
|||||||
#include <IO/WriteHelpers.h>
|
#include <IO/WriteHelpers.h>
|
||||||
#include <IO/copyData.h>
|
#include <IO/copyData.h>
|
||||||
#include <Poco/Util/XMLConfiguration.h>
|
#include <Poco/Util/XMLConfiguration.h>
|
||||||
|
#include <Poco/DOM/DOMParser.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -352,8 +354,11 @@ void BackupImpl::writeBackupMetadata()
|
|||||||
increaseUncompressedSize(str.size());
|
increaseUncompressedSize(str.size());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void BackupImpl::readBackupMetadata()
|
void BackupImpl::readBackupMetadata()
|
||||||
{
|
{
|
||||||
|
using namespace XMLUtils;
|
||||||
|
|
||||||
std::unique_ptr<ReadBuffer> in;
|
std::unique_ptr<ReadBuffer> in;
|
||||||
if (use_archives)
|
if (use_archives)
|
||||||
{
|
{
|
||||||
@ -372,40 +377,39 @@ void BackupImpl::readBackupMetadata()
|
|||||||
String str;
|
String str;
|
||||||
readStringUntilEOF(str, *in);
|
readStringUntilEOF(str, *in);
|
||||||
increaseUncompressedSize(str.size());
|
increaseUncompressedSize(str.size());
|
||||||
std::istringstream stream(str); // STYLE_CHECK_ALLOW_STD_STRING_STREAM
|
Poco::XML::DOMParser dom_parser;
|
||||||
Poco::AutoPtr<Poco::Util::XMLConfiguration> config{new Poco::Util::XMLConfiguration()};
|
Poco::AutoPtr<Poco::XML::Document> config = dom_parser.parseMemory(str.data(), str.size());
|
||||||
config->load(stream);
|
const Poco::XML::Node * config_root = getRootNode(config);
|
||||||
|
|
||||||
version = config->getInt("version");
|
version = getInt(config_root, "version");
|
||||||
if ((version < INITIAL_BACKUP_VERSION) || (version > CURRENT_BACKUP_VERSION))
|
if ((version < INITIAL_BACKUP_VERSION) || (version > CURRENT_BACKUP_VERSION))
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::BACKUP_VERSION_NOT_SUPPORTED, "Backup {}: Version {} is not supported", backup_name_for_logging, version);
|
ErrorCodes::BACKUP_VERSION_NOT_SUPPORTED, "Backup {}: Version {} is not supported", backup_name_for_logging, version);
|
||||||
|
|
||||||
timestamp = parse<LocalDateTime>(config->getString("timestamp")).to_time_t();
|
timestamp = parse<::LocalDateTime>(getString(config_root, "timestamp")).to_time_t();
|
||||||
uuid = parse<UUID>(config->getString("uuid"));
|
uuid = parse<UUID>(getString(config_root, "uuid"));
|
||||||
|
|
||||||
if (config->has("base_backup") && !base_backup_info)
|
if (config_root->getNodeByPath("base_backup") && !base_backup_info)
|
||||||
base_backup_info = BackupInfo::fromString(config->getString("base_backup"));
|
base_backup_info = BackupInfo::fromString(getString(config_root, "base_backup"));
|
||||||
|
|
||||||
if (config->has("base_backup_uuid"))
|
if (config_root->getNodeByPath("base_backup_uuid"))
|
||||||
base_backup_uuid = parse<UUID>(config->getString("base_backup_uuid"));
|
base_backup_uuid = parse<UUID>(getString(config_root, "base_backup_uuid"));
|
||||||
|
|
||||||
Poco::Util::AbstractConfiguration::Keys keys;
|
const auto * contents = config_root->getNodeByPath("contents");
|
||||||
config->keys("contents", keys);
|
for (const Poco::XML::Node * child = contents->firstChild(); child; child = child->nextSibling())
|
||||||
for (const auto & key : keys)
|
|
||||||
{
|
{
|
||||||
if ((key == "file") || key.starts_with("file["))
|
if (child->nodeName() == "file")
|
||||||
{
|
{
|
||||||
String prefix = "contents." + key + ".";
|
const Poco::XML::Node * file_config = child;
|
||||||
FileInfo info;
|
FileInfo info;
|
||||||
info.file_name = config->getString(prefix + "name");
|
info.file_name = getString(file_config, "name");
|
||||||
info.size = config->getUInt64(prefix + "size");
|
info.size = getUInt64(file_config, "size");
|
||||||
if (info.size)
|
if (info.size)
|
||||||
{
|
{
|
||||||
info.checksum = unhexChecksum(config->getString(prefix + "checksum"));
|
info.checksum = unhexChecksum(getString(file_config, "checksum"));
|
||||||
|
|
||||||
bool use_base = config->getBool(prefix + "use_base", false);
|
bool use_base = getBool(file_config, "use_base", false);
|
||||||
info.base_size = config->getUInt64(prefix + "base_size", use_base ? info.size : 0);
|
info.base_size = getUInt64(file_config, "base_size", use_base ? info.size : 0);
|
||||||
if (info.base_size)
|
if (info.base_size)
|
||||||
use_base = true;
|
use_base = true;
|
||||||
|
|
||||||
@ -423,14 +427,14 @@ void BackupImpl::readBackupMetadata()
|
|||||||
if (info.base_size == info.size)
|
if (info.base_size == info.size)
|
||||||
info.base_checksum = info.checksum;
|
info.base_checksum = info.checksum;
|
||||||
else
|
else
|
||||||
info.base_checksum = unhexChecksum(config->getString(prefix + "base_checksum"));
|
info.base_checksum = unhexChecksum(getString(file_config, "base_checksum"));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (info.size > info.base_size)
|
if (info.size > info.base_size)
|
||||||
{
|
{
|
||||||
info.data_file_name = config->getString(prefix + "data_file", info.file_name);
|
info.data_file_name = getString(file_config, "data_file", info.file_name);
|
||||||
info.archive_suffix = config->getString(prefix + "archive_suffix", "");
|
info.archive_suffix = getString(file_config, "archive_suffix", "");
|
||||||
info.pos_in_archive = config->getUInt64(prefix + "pos_in_archive", static_cast<UInt64>(-1));
|
info.pos_in_archive = getUInt64(file_config, "pos_in_archive", static_cast<UInt64>(-1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,6 +96,7 @@ RestorerFromBackup::RestorerFromBackup(
|
|||||||
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
, on_cluster_first_sync_timeout(context->getConfigRef().getUInt64("backups.on_cluster_first_sync_timeout", 180000))
|
||||||
, create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000))
|
, create_table_timeout(context->getConfigRef().getUInt64("backups.create_table_timeout", 300000))
|
||||||
, log(&Poco::Logger::get("RestorerFromBackup"))
|
, log(&Poco::Logger::get("RestorerFromBackup"))
|
||||||
|
, tables_dependencies("RestorerFromBackup")
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,6 +134,7 @@ RestorerFromBackup::DataRestoreTasks RestorerFromBackup::run(Mode mode)
|
|||||||
|
|
||||||
/// Create tables using the create queries read from the backup.
|
/// Create tables using the create queries read from the backup.
|
||||||
setStage(Stage::CREATING_TABLES);
|
setStage(Stage::CREATING_TABLES);
|
||||||
|
removeUnresolvedDependencies();
|
||||||
createTables();
|
createTables();
|
||||||
|
|
||||||
/// All what's left is to insert data to tables.
|
/// All what's left is to insert data to tables.
|
||||||
@ -341,10 +343,11 @@ void RestorerFromBackup::findTableInBackup(const QualifiedTableName & table_name
|
|||||||
TableInfo & res_table_info = table_infos[table_name];
|
TableInfo & res_table_info = table_infos[table_name];
|
||||||
res_table_info.create_table_query = create_table_query;
|
res_table_info.create_table_query = create_table_query;
|
||||||
res_table_info.is_predefined_table = DatabaseCatalog::instance().isPredefinedTable(StorageID{table_name.database, table_name.table});
|
res_table_info.is_predefined_table = DatabaseCatalog::instance().isPredefinedTable(StorageID{table_name.database, table_name.table});
|
||||||
res_table_info.dependencies = getDependenciesSetFromCreateQuery(context->getGlobalContext(), table_name, create_table_query);
|
|
||||||
res_table_info.has_data = backup->hasFiles(data_path_in_backup);
|
res_table_info.has_data = backup->hasFiles(data_path_in_backup);
|
||||||
res_table_info.data_path_in_backup = data_path_in_backup;
|
res_table_info.data_path_in_backup = data_path_in_backup;
|
||||||
|
|
||||||
|
tables_dependencies.addDependencies(table_name, getDependenciesFromCreateQuery(context->getGlobalContext(), table_name, create_table_query));
|
||||||
|
|
||||||
if (partitions)
|
if (partitions)
|
||||||
{
|
{
|
||||||
if (!res_table_info.partitions)
|
if (!res_table_info.partitions)
|
||||||
@ -622,21 +625,62 @@ void RestorerFromBackup::checkDatabase(const String & database_name)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void RestorerFromBackup::removeUnresolvedDependencies()
|
||||||
|
{
|
||||||
|
auto need_exclude_dependency = [this](const StorageID & table_id)
|
||||||
|
{
|
||||||
|
/// Table will be restored.
|
||||||
|
if (table_infos.contains(table_id.getQualifiedName()))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/// Table exists and it already exists
|
||||||
|
if (!DatabaseCatalog::instance().isTableExist(table_id, context))
|
||||||
|
{
|
||||||
|
LOG_WARNING(
|
||||||
|
log,
|
||||||
|
"Tables {} in backup depend on {}, but seems like {} is not in the backup and does not exist. "
|
||||||
|
"Will try to ignore that and restore tables",
|
||||||
|
fmt::join(tables_dependencies.getDependents(table_id), ", "),
|
||||||
|
table_id,
|
||||||
|
table_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t num_dependencies, num_dependents;
|
||||||
|
tables_dependencies.getNumberOfAdjacents(table_id, num_dependencies, num_dependents);
|
||||||
|
if (num_dependencies || !num_dependents)
|
||||||
|
throw Exception(
|
||||||
|
ErrorCodes::LOGICAL_ERROR,
|
||||||
|
"Table {} in backup doesn't have dependencies and dependent tables as it expected to. It's a bug",
|
||||||
|
table_id);
|
||||||
|
|
||||||
|
return true; /// Exclude this dependency.
|
||||||
|
};
|
||||||
|
|
||||||
|
tables_dependencies.removeTablesIf(need_exclude_dependency);
|
||||||
|
|
||||||
|
if (tables_dependencies.getNumberOfTables() != table_infos.size())
|
||||||
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Number of tables to be restored is not as expected. It's a bug");
|
||||||
|
|
||||||
|
if (tables_dependencies.hasCyclicDependencies())
|
||||||
|
{
|
||||||
|
LOG_WARNING(
|
||||||
|
log,
|
||||||
|
"Tables {} in backup have cyclic dependencies: {}. Will try to ignore that and restore tables",
|
||||||
|
fmt::join(tables_dependencies.getTablesWithCyclicDependencies(), ", "),
|
||||||
|
tables_dependencies.describeCyclicDependencies());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void RestorerFromBackup::createTables()
|
void RestorerFromBackup::createTables()
|
||||||
{
|
{
|
||||||
while (true)
|
/// We need to create tables considering their dependencies.
|
||||||
|
auto tables_to_create = tables_dependencies.getTablesSortedByDependency();
|
||||||
|
for (const auto & table_id : tables_to_create)
|
||||||
{
|
{
|
||||||
/// We need to create tables considering their dependencies.
|
auto table_name = table_id.getQualifiedName();
|
||||||
auto tables_to_create = findTablesWithoutDependencies();
|
createTable(table_name);
|
||||||
if (tables_to_create.empty())
|
checkTable(table_name);
|
||||||
break; /// We've already created all the tables.
|
insertDataToTable(table_name);
|
||||||
|
|
||||||
for (const auto & table_name : tables_to_create)
|
|
||||||
{
|
|
||||||
createTable(table_name);
|
|
||||||
checkTable(table_name);
|
|
||||||
insertDataToTable(table_name);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -752,62 +796,6 @@ void RestorerFromBackup::insertDataToTable(const QualifiedTableName & table_name
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the list of tables without dependencies or those which dependencies have been created before.
|
|
||||||
std::vector<QualifiedTableName> RestorerFromBackup::findTablesWithoutDependencies() const
|
|
||||||
{
|
|
||||||
std::vector<QualifiedTableName> tables_without_dependencies;
|
|
||||||
bool all_tables_created = true;
|
|
||||||
|
|
||||||
for (const auto & [key, table_info] : table_infos)
|
|
||||||
{
|
|
||||||
if (table_info.storage)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
/// Found a table which is not created yet.
|
|
||||||
all_tables_created = false;
|
|
||||||
|
|
||||||
/// Check if all dependencies have been created before.
|
|
||||||
bool all_dependencies_met = true;
|
|
||||||
for (const auto & dependency : table_info.dependencies)
|
|
||||||
{
|
|
||||||
auto it = table_infos.find(dependency);
|
|
||||||
if ((it != table_infos.end()) && !it->second.storage)
|
|
||||||
{
|
|
||||||
all_dependencies_met = false;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (all_dependencies_met)
|
|
||||||
tables_without_dependencies.push_back(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!tables_without_dependencies.empty())
|
|
||||||
return tables_without_dependencies;
|
|
||||||
|
|
||||||
if (all_tables_created)
|
|
||||||
return {};
|
|
||||||
|
|
||||||
/// Cyclic dependency? We'll try to create those tables anyway but probably it's going to fail.
|
|
||||||
std::vector<QualifiedTableName> tables_with_cyclic_dependencies;
|
|
||||||
for (const auto & [key, table_info] : table_infos)
|
|
||||||
{
|
|
||||||
if (!table_info.storage)
|
|
||||||
tables_with_cyclic_dependencies.push_back(key);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Only show a warning here, proper exception will be thrown later on creating those tables.
|
|
||||||
LOG_WARNING(
|
|
||||||
log,
|
|
||||||
"Some tables have cyclic dependency from each other: {}",
|
|
||||||
boost::algorithm::join(
|
|
||||||
tables_with_cyclic_dependencies
|
|
||||||
| boost::adaptors::transformed([](const QualifiedTableName & table_name) -> String { return table_name.getFullName(); }),
|
|
||||||
", "));
|
|
||||||
|
|
||||||
return tables_with_cyclic_dependencies;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RestorerFromBackup::addDataRestoreTask(DataRestoreTask && new_task)
|
void RestorerFromBackup::addDataRestoreTask(DataRestoreTask && new_task)
|
||||||
{
|
{
|
||||||
if (current_stage == Stage::INSERTING_DATA_TO_TABLES)
|
if (current_stage == Stage::INSERTING_DATA_TO_TABLES)
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
#include <Backups/RestoreSettings.h>
|
#include <Backups/RestoreSettings.h>
|
||||||
#include <Databases/DDLRenamingVisitor.h>
|
#include <Databases/DDLRenamingVisitor.h>
|
||||||
|
#include <Databases/TablesDependencyGraph.h>
|
||||||
#include <Parsers/ASTBackupQuery.h>
|
#include <Parsers/ASTBackupQuery.h>
|
||||||
#include <Storages/TableLockHolder.h>
|
#include <Storages/TableLockHolder.h>
|
||||||
#include <Storages/IStorage_fwd.h>
|
#include <Storages/IStorage_fwd.h>
|
||||||
@ -94,6 +95,7 @@ private:
|
|||||||
void createDatabase(const String & database_name) const;
|
void createDatabase(const String & database_name) const;
|
||||||
void checkDatabase(const String & database_name);
|
void checkDatabase(const String & database_name);
|
||||||
|
|
||||||
|
void removeUnresolvedDependencies();
|
||||||
void createTables();
|
void createTables();
|
||||||
void createTable(const QualifiedTableName & table_name);
|
void createTable(const QualifiedTableName & table_name);
|
||||||
void checkTable(const QualifiedTableName & table_name);
|
void checkTable(const QualifiedTableName & table_name);
|
||||||
@ -114,7 +116,6 @@ private:
|
|||||||
{
|
{
|
||||||
ASTPtr create_table_query;
|
ASTPtr create_table_query;
|
||||||
bool is_predefined_table = false;
|
bool is_predefined_table = false;
|
||||||
std::unordered_set<QualifiedTableName> dependencies;
|
|
||||||
bool has_data = false;
|
bool has_data = false;
|
||||||
std::filesystem::path data_path_in_backup;
|
std::filesystem::path data_path_in_backup;
|
||||||
std::optional<ASTs> partitions;
|
std::optional<ASTs> partitions;
|
||||||
@ -123,11 +124,10 @@ private:
|
|||||||
TableLockHolder table_lock;
|
TableLockHolder table_lock;
|
||||||
};
|
};
|
||||||
|
|
||||||
std::vector<QualifiedTableName> findTablesWithoutDependencies() const;
|
|
||||||
|
|
||||||
String current_stage;
|
String current_stage;
|
||||||
std::unordered_map<String, DatabaseInfo> database_infos;
|
std::unordered_map<String, DatabaseInfo> database_infos;
|
||||||
std::map<QualifiedTableName, TableInfo> table_infos;
|
std::map<QualifiedTableName, TableInfo> table_infos;
|
||||||
|
TablesDependencyGraph tables_dependencies;
|
||||||
std::vector<DataRestoreTask> data_restore_tasks;
|
std::vector<DataRestoreTask> data_restore_tasks;
|
||||||
std::unique_ptr<AccessRestorerFromBackup> access_restorer;
|
std::unique_ptr<AccessRestorerFromBackup> access_restorer;
|
||||||
bool access_restored = false;
|
bool access_restored = false;
|
||||||
|
@ -236,7 +236,7 @@ int IBridge::main(const std::vector<std::string> & /*args*/)
|
|||||||
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
|
SensitiveDataMasker::setInstance(std::make_unique<SensitiveDataMasker>(config(), "query_masking_rules"));
|
||||||
|
|
||||||
auto server = HTTPServer(
|
auto server = HTTPServer(
|
||||||
context,
|
std::make_shared<HTTPContext>(context),
|
||||||
getHandlerFactoryPtr(context),
|
getHandlerFactoryPtr(context),
|
||||||
server_pool,
|
server_pool,
|
||||||
socket,
|
socket,
|
||||||
|
@ -1671,6 +1671,11 @@ void ClientBase::processParsedSingleQuery(const String & full_query, const Strin
|
|||||||
std::cerr << progress_indication.elapsedSeconds() << "\n";
|
std::cerr << progress_indication.elapsedSeconds() << "\n";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!is_interactive && print_num_processed_rows)
|
||||||
|
{
|
||||||
|
std::cout << "Processed rows: " << processed_rows << "\n";
|
||||||
|
}
|
||||||
|
|
||||||
if (have_error && report_error)
|
if (have_error && report_error)
|
||||||
processError(full_query);
|
processError(full_query);
|
||||||
}
|
}
|
||||||
@ -2368,6 +2373,7 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
("hardware-utilization", "print hardware utilization information in progress bar")
|
("hardware-utilization", "print hardware utilization information in progress bar")
|
||||||
("print-profile-events", po::value(&profile_events.print)->zero_tokens(), "Printing ProfileEvents packets")
|
("print-profile-events", po::value(&profile_events.print)->zero_tokens(), "Printing ProfileEvents packets")
|
||||||
("profile-events-delay-ms", po::value<UInt64>()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)")
|
("profile-events-delay-ms", po::value<UInt64>()->default_value(profile_events.delay_ms), "Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet)")
|
||||||
|
("processed-rows", "print the number of locally processed rows")
|
||||||
|
|
||||||
("interactive", "Process queries-file or --query query and start interactive mode")
|
("interactive", "Process queries-file or --query query and start interactive mode")
|
||||||
("pager", po::value<std::string>(), "Pipe all output into this command (less or similar)")
|
("pager", po::value<std::string>(), "Pipe all output into this command (less or similar)")
|
||||||
@ -2446,6 +2452,8 @@ void ClientBase::init(int argc, char ** argv)
|
|||||||
config().setBool("print-profile-events", true);
|
config().setBool("print-profile-events", true);
|
||||||
if (options.count("profile-events-delay-ms"))
|
if (options.count("profile-events-delay-ms"))
|
||||||
config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
config().setUInt64("profile-events-delay-ms", options["profile-events-delay-ms"].as<UInt64>());
|
||||||
|
if (options.count("processed-rows"))
|
||||||
|
print_num_processed_rows = true;
|
||||||
if (options.count("progress"))
|
if (options.count("progress"))
|
||||||
{
|
{
|
||||||
switch (options["progress"].as<ProgressOption>())
|
switch (options["progress"].as<ProgressOption>())
|
||||||
|
@ -253,6 +253,7 @@ protected:
|
|||||||
bool need_render_profile_events = true;
|
bool need_render_profile_events = true;
|
||||||
bool written_first_block = false;
|
bool written_first_block = false;
|
||||||
size_t processed_rows = 0; /// How many rows have been read or written.
|
size_t processed_rows = 0; /// How many rows have been read or written.
|
||||||
|
bool print_num_processed_rows = false; /// Whether to print the number of processed rows at
|
||||||
|
|
||||||
bool print_stack_trace = false;
|
bool print_stack_trace = false;
|
||||||
/// The last exception that was received from the server. Is used for the
|
/// The last exception that was received from the server. Is used for the
|
||||||
|
@ -148,7 +148,8 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
|||||||
socket->setReceiveTimeout(timeouts.receive_timeout);
|
socket->setReceiveTimeout(timeouts.receive_timeout);
|
||||||
socket->setSendTimeout(timeouts.send_timeout);
|
socket->setSendTimeout(timeouts.send_timeout);
|
||||||
socket->setNoDelay(true);
|
socket->setNoDelay(true);
|
||||||
if (timeouts.tcp_keep_alive_timeout.totalSeconds())
|
int tcp_keep_alive_timeout_in_sec = timeouts.tcp_keep_alive_timeout.totalSeconds();
|
||||||
|
if (tcp_keep_alive_timeout_in_sec)
|
||||||
{
|
{
|
||||||
socket->setKeepAlive(true);
|
socket->setKeepAlive(true);
|
||||||
socket->setOption(IPPROTO_TCP,
|
socket->setOption(IPPROTO_TCP,
|
||||||
@ -157,7 +158,7 @@ void Connection::connect(const ConnectionTimeouts & timeouts)
|
|||||||
#else
|
#else
|
||||||
TCP_KEEPIDLE // __APPLE__
|
TCP_KEEPIDLE // __APPLE__
|
||||||
#endif
|
#endif
|
||||||
, timeouts.tcp_keep_alive_timeout);
|
, tcp_keep_alive_timeout_in_sec);
|
||||||
}
|
}
|
||||||
|
|
||||||
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
|
in = std::make_shared<ReadBufferFromPocoSocket>(*socket);
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user