Merge branch 'master' into stress_s3

This commit is contained in:
alesapin 2022-08-29 11:25:28 +02:00
commit 133ca01447
1712 changed files with 7436 additions and 1536 deletions

View File

@ -164,7 +164,6 @@ if (HAS_RESERVED_IDENTIFIER)
add_compile_definitions (HAS_RESERVED_IDENTIFIER) add_compile_definitions (HAS_RESERVED_IDENTIFIER)
endif () endif ()
# If turned `ON`, assumes the user has either the system GTest library or the bundled one.
option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON) option(ENABLE_TESTS "Provide unit_test_dbms target with Google.Test unit tests" ON)
option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF) option(ENABLE_EXAMPLES "Build all example programs in 'examples' subdirectories" OFF)
@ -200,8 +199,8 @@ endif ()
option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.") option(ADD_GDB_INDEX_FOR_GOLD "Add .gdb-index to resulting binaries for gold linker.")
if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE") if (NOT CMAKE_BUILD_TYPE_UC STREQUAL "RELEASE")
# Can be lld or ld-lld. # Can be lld or ld-lld or lld-13 or /path/to/lld.
if (LINKER_NAME MATCHES "lld$") if (LINKER_NAME MATCHES "lld")
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--gdb-index")
set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index") set (CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} -Wl,--gdb-index")
message (STATUS "Adding .gdb-index via --gdb-index linker option.") message (STATUS "Adding .gdb-index via --gdb-index linker option.")
@ -246,7 +245,8 @@ else ()
endif () endif ()
# Create BuildID when using lld. For other linkers it is created by default. # Create BuildID when using lld. For other linkers it is created by default.
if (LINKER_NAME MATCHES "lld$") # (NOTE: LINKER_NAME can be either path or name, and in different variants)
if (LINKER_NAME MATCHES "lld")
# SHA1 is not cryptographically secure but it is the best what lld is offering. # SHA1 is not cryptographically secure but it is the best what lld is offering.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1") set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,--build-id=sha1")
endif () endif ()
@ -368,8 +368,7 @@ endif()
set (COMPILER_FLAGS "${COMPILER_FLAGS}") set (COMPILER_FLAGS "${COMPILER_FLAGS}")
# Our built-in unwinder only supports DWARF version up to 4. set (DEBUG_INFO_FLAGS "-g")
set (DEBUG_INFO_FLAGS "-g -gdwarf-4")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}") set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${COMPILER_FLAGS}")
set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}") set (CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} -O3 ${DEBUG_INFO_FLAGS} ${CMAKE_CXX_FLAGS_ADD}")
@ -601,6 +600,7 @@ if (NATIVE_BUILD_TARGETS
"-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}" "-DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}"
"-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}" "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}"
"-DENABLE_CCACHE=${ENABLE_CCACHE}" "-DENABLE_CCACHE=${ENABLE_CCACHE}"
"-DENABLE_CLICKHOUSE_SELF_EXTRACTING=${ENABLE_CLICKHOUSE_SELF_EXTRACTING}"
${CMAKE_SOURCE_DIR} ${CMAKE_SOURCE_DIR}
WORKING_DIRECTORY "${NATIVE_BUILD_DIR}" WORKING_DIRECTORY "${NATIVE_BUILD_DIR}"
COMMAND_ECHO STDOUT) COMMAND_ECHO STDOUT)

View File

@ -7,12 +7,8 @@
# How to install Ninja on Ubuntu: # How to install Ninja on Ubuntu:
# sudo apt-get install ninja-build # sudo apt-get install ninja-build
# CLion does not support Ninja
# You can add your vote on CLion task tracker:
# https://youtrack.jetbrains.com/issue/CPP-2659
# https://youtrack.jetbrains.com/issue/CPP-870
if (NOT DEFINED ENV{CLION_IDE} AND NOT DEFINED ENV{XCODE_IDE}) if (NOT DEFINED ENV{XCODE_IDE})
find_program(NINJA_PATH ninja) find_program(NINJA_PATH ninja)
if (NINJA_PATH) if (NINJA_PATH)
set(CMAKE_GENERATOR "Ninja" CACHE INTERNAL "") set(CMAKE_GENERATOR "Ninja" CACHE INTERNAL "")

View File

@ -78,6 +78,7 @@ RUN export CODENAME="$(lsb_release --codename --short | tr 'A-Z' 'a-z')" \
&& apt-get update \ && apt-get update \
&& apt-get install \ && apt-get install \
clang-15 \ clang-15 \
llvm-15 \
clang-tidy-15 \ clang-tidy-15 \
--yes --no-install-recommends \ --yes --no-install-recommends \
&& apt-get clean && apt-get clean

View File

@ -26,7 +26,7 @@ ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
# set non-empty deb_location_url url to create a docker image # set non-empty deb_location_url url to create a docker image
# from debs created by CI build, for example: # from debs created by CI build, for example:
# docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://clickhouse-builds.s3.yandex.net/21852/069cfbff388b3d478d1a16dc7060b48073f5d522/clickhouse_build_check/clang-11_relwithdebuginfo_none_bundled_unsplitted_disable_False_deb/" -t filimonovq/clickhouse-server:pr21852 # docker build . --network host --build-arg version="21.4.1.6282" --build-arg deb_location_url="https://..." -t ...
ARG deb_location_url="" ARG deb_location_url=""
# set non-empty single_binary_location_url to create docker image # set non-empty single_binary_location_url to create docker image

View File

@ -12,7 +12,7 @@ stage=${stage:-}
script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
echo "$script_dir" echo "$script_dir"
repo_dir=ch repo_dir=ch
BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_bundled_unsplitted_disable_False_binary"} BINARY_TO_DOWNLOAD=${BINARY_TO_DOWNLOAD:="clang-14_debug_none_unsplitted_disable_False_binary"}
BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"} BINARY_URL_TO_DOWNLOAD=${BINARY_URL_TO_DOWNLOAD:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/$BINARY_TO_DOWNLOAD/clickhouse"}
function clone function clone

View File

@ -2,7 +2,7 @@
set -euo pipefail set -euo pipefail
CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_bundled_unsplitted_disable_False_binary/clickhouse"} CLICKHOUSE_PACKAGE=${CLICKHOUSE_PACKAGE:="https://clickhouse-builds.s3.amazonaws.com/$PR_TO_TEST/$SHA_TO_TEST/clickhouse_build_check/clang-14_relwithdebuginfo_none_unsplitted_disable_False_binary/clickhouse"}
CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""} CLICKHOUSE_REPO_PATH=${CLICKHOUSE_REPO_PATH:=""}

View File

@ -88,13 +88,15 @@ sleep 5
function run_tests() function run_tests()
{ {
set -x set -x
# We can have several additional options so we path them as array because it's # We can have several additional options so we pass them as array because it is more ideologically correct.
# more idiologically correct.
read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}" read -ra ADDITIONAL_OPTIONS <<< "${ADDITIONAL_OPTIONS:-}"
HIGH_LEVEL_COVERAGE=YES
# Use random order in flaky check # Use random order in flaky check
if [ "$NUM_TRIES" -gt "1" ]; then if [ "$NUM_TRIES" -gt "1" ]; then
ADDITIONAL_OPTIONS+=('--order=random') ADDITIONAL_OPTIONS+=('--order=random')
HIGH_LEVEL_COVERAGE=NO
fi fi
if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then if [[ -n "$USE_S3_STORAGE_FOR_MERGE_TREE" ]] && [[ "$USE_S3_STORAGE_FOR_MERGE_TREE" -eq 1 ]]; then
@ -117,12 +119,17 @@ function run_tests()
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM") ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_NUM")
ADDITIONAL_OPTIONS+=('--run-by-hash-total') ADDITIONAL_OPTIONS+=('--run-by-hash-total')
ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL") ADDITIONAL_OPTIONS+=("$RUN_BY_HASH_TOTAL")
HIGH_LEVEL_COVERAGE=NO
fi fi
if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then if [[ -n "$USE_DATABASE_ORDINARY" ]] && [[ "$USE_DATABASE_ORDINARY" -eq 1 ]]; then
ADDITIONAL_OPTIONS+=('--db-engine=Ordinary') ADDITIONAL_OPTIONS+=('--db-engine=Ordinary')
fi fi
if [[ "${HIGH_LEVEL_COVERAGE}" = "YES" ]]; then
ADDITIONAL_OPTIONS+=('--report-coverage')
fi
set +e set +e
clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \ clickhouse-test --testname --shard --zookeeper --check-zookeeper-session --hung-check --print-time \
--test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \ --test-runs "$NUM_TRIES" "${ADDITIONAL_OPTIONS[@]}" 2>&1 \

View File

@ -1,4 +1,5 @@
#!/bin/bash #!/bin/bash
# shellcheck disable=SC2024
set -e -x -a -u set -e -x -a -u
@ -9,7 +10,7 @@ cd hadoop-3.3.1
export JAVA_HOME=/usr export JAVA_HOME=/usr
mkdir -p target/test/data mkdir -p target/test/data
chown clickhouse ./target/test/data chown clickhouse ./target/test/data
sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 & sudo -E -u clickhouse bin/mapred minicluster -format -nomr -nnport 12222 >> /test_output/garbage.log 2>&1 &
while ! nc -z localhost 12222; do while ! nc -z localhost 12222; do
sleep 1 sleep 1

View File

@ -105,12 +105,16 @@ EOL
function stop() function stop()
{ {
local pid
# Preserve the pid, since the server can hung after the PID will be deleted.
pid="$(cat /var/run/clickhouse-server/clickhouse-server.pid)"
clickhouse stop --do-not-kill && return clickhouse stop --do-not-kill && return
# We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces. # We failed to stop the server with SIGTERM. Maybe it hang, let's collect stacktraces.
kill -TERM "$(pidof gdb)" ||: kill -TERM "$(pidof gdb)" ||:
sleep 5 sleep 5
echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log echo "thread apply all backtrace (on stop)" >> /test_output/gdb.log
gdb -batch -ex 'thread apply all backtrace' -p "$(cat /var/run/clickhouse-server/clickhouse-server.pid)" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log gdb -batch -ex 'thread apply all backtrace' -p "$pid" | ts '%Y-%m-%d %H:%M:%S' >> /test_output/gdb.log
clickhouse stop --force clickhouse stop --force
} }
@ -474,6 +478,13 @@ else
[ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt [ -s /test_output/bc_check_fatal_messages.txt ] || rm /test_output/bc_check_fatal_messages.txt
fi fi
dmesg -T > /test_output/dmesg.log
# OOM in dmesg -- those are real
grep -q -F -e 'Out of memory: Killed process' -e 'oom_reaper: reaped process' -e 'oom-kill:constraint=CONSTRAINT_NONE' /test_output/dmesg.log \
&& echo -e 'OOM in dmesg\tFAIL' >> /test_output/test_results.tsv \
|| echo -e 'No OOM in dmesg\tOK' >> /test_output/test_results.tsv
tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||: tar -chf /test_output/coordination.tar /var/lib/clickhouse/coordination ||:
mv /var/log/clickhouse-server/stderr.log /test_output/ mv /var/log/clickhouse-server/stderr.log /test_output/
@ -495,5 +506,3 @@ for core in core.*; do
pigz $core pigz $core
mv $core.gz /test_output/ mv $core.gz /test_output/
done done
dmesg -T > /test_output/dmesg.log

View File

@ -14,8 +14,6 @@ do
|| curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
done done
exit 0
tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \
|| tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"
sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh"
@ -26,7 +24,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh"
tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \
|| tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"
sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure
sudo /etc/init.d/clickhouse-server start sudo /etc/init.d/clickhouse-server start
tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \

View File

@ -1,10 +1,11 @@
--- ---
slug: /en/development/adding_test_queries
sidebar_label: Adding Test Queries sidebar_label: Adding Test Queries
sidebar_position: 63 sidebar_position: 63
title: How to add test queries to ClickHouse CI
description: Instructions on how to add a test case to ClickHouse continuous integration description: Instructions on how to add a test case to ClickHouse continuous integration
--- ---
# How to add test queries to ClickHouse CI
ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases. ClickHouse has hundreds (or even thousands) of features. Every commit gets checked by a complex set of tests containing many thousands of test cases.

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/development/architecture
sidebar_label: Architecture Overview sidebar_label: Architecture Overview
sidebar_position: 62 sidebar_position: 62
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/development/browse-code
sidebar_label: Source Code Browser sidebar_label: Source Code Browser
sidebar_position: 72 sidebar_position: 72
description: Various ways to browse and edit the source code description: Various ways to browse and edit the source code

View File

@ -1,10 +1,10 @@
--- ---
slug: /en/development/build-cross-arm
sidebar_position: 67 sidebar_position: 67
title: How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture
sidebar_label: Build on Linux for AARCH64 (ARM64) sidebar_label: Build on Linux for AARCH64 (ARM64)
--- ---
# How to Build ClickHouse on Linux for AARCH64 (ARM64) Architecture
If you use AArch64 machine and want to build ClickHouse for AArch64, build as usual. If you use AArch64 machine and want to build ClickHouse for AArch64, build as usual.
If you use x86_64 machine and want cross-compile for AArch64, add the following flag to `cmake`: `-DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake` If you use x86_64 machine and want cross-compile for AArch64, add the following flag to `cmake`: `-DCMAKE_TOOLCHAIN_FILE=cmake/linux/toolchain-aarch64.cmake`

View File

@ -1,9 +1,10 @@
--- ---
slug: /en/development/build-cross-osx
sidebar_position: 66 sidebar_position: 66
title: How to Build ClickHouse on Linux for Mac OS X
sidebar_label: Build on Linux for Mac OS X sidebar_label: Build on Linux for Mac OS X
--- ---
# How to Build ClickHouse on Linux for Mac OS X
This is for the case when you have a Linux machine and want to use it to build `clickhouse` binary that will run on OS X. This is for the case when you have a Linux machine and want to use it to build `clickhouse` binary that will run on OS X.
This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md). This is intended for continuous integration checks that run on Linux servers. If you want to build ClickHouse directly on Mac OS X, then proceed with [another instruction](../development/build-osx.md).

View File

@ -1,10 +1,10 @@
--- ---
slug: /en/development/build-cross-riscv
sidebar_position: 68 sidebar_position: 68
title: How to Build ClickHouse on Linux for RISC-V 64 Architecture
sidebar_label: Build on Linux for RISC-V 64 sidebar_label: Build on Linux for RISC-V 64
--- ---
# How to Build ClickHouse on Linux for RISC-V 64 Architecture
As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled. As of writing (11.11.2021) building for risc-v considered to be highly experimental. Not all features can be enabled.
This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers. This is for the case when you have Linux machine and want to use it to build `clickhouse` binary that will run on another Linux machine with RISC-V 64 CPU architecture. This is intended for continuous integration checks that run on Linux servers.

View File

@ -1,11 +1,11 @@
--- ---
slug: /en/development/build-osx
sidebar_position: 65 sidebar_position: 65
sidebar_label: Build on Mac OS X sidebar_label: Build on Mac OS X
title: How to Build ClickHouse on Mac OS X
description: How to build ClickHouse on Mac OS X description: How to build ClickHouse on Mac OS X
--- ---
# How to Build ClickHouse on Mac OS X
:::info You don't have to build ClickHouse yourself! :::info You don't have to build ClickHouse yourself!
You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions. You can install pre-built ClickHouse as described in [Quick Start](https://clickhouse.com/#quick-start). Follow **macOS (Intel)** or **macOS (Apple silicon)** installation instructions.
::: :::

View File

@ -1,10 +1,11 @@
--- ---
slug: /en/development/build
sidebar_position: 64 sidebar_position: 64
sidebar_label: Build on Linux sidebar_label: Build on Linux
title: How to Build ClickHouse on Linux
description: How to build ClickHouse on Linux description: How to build ClickHouse on Linux
--- ---
# How to Build ClickHouse on Linux
Supported platforms: Supported platforms:

View File

@ -1,11 +1,11 @@
--- ---
slug: /en/development/continuous-integration
sidebar_position: 62 sidebar_position: 62
sidebar_label: Continuous Integration Checks sidebar_label: Continuous Integration Checks
title: Continuous Integration Checks
description: When you submit a pull request, some automated checks are ran for your code by the ClickHouse continuous integration (CI) system description: When you submit a pull request, some automated checks are ran for your code by the ClickHouse continuous integration (CI) system
--- ---
# Continuous Integration Checks
When you submit a pull request, some automated checks are ran for your code by When you submit a pull request, some automated checks are ran for your code by
the ClickHouse [continuous integration (CI) system](tests.md#test-automation). the ClickHouse [continuous integration (CI) system](tests.md#test-automation).
This happens after a repository maintainer (someone from ClickHouse team) has This happens after a repository maintainer (someone from ClickHouse team) has
@ -54,7 +54,7 @@ the documentation is wrong. Go to the check report and look for `ERROR` and `WAR
Check that the description of your pull request conforms to the template Check that the description of your pull request conforms to the template
[PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md). [PULL_REQUEST_TEMPLATE.md](https://github.com/ClickHouse/ClickHouse/blob/master/.github/PULL_REQUEST_TEMPLATE.md).
You have to specify a changelog category for your change (e.g., Bug Fix), and You have to specify a changelog category for your change (e.g., Bug Fix), and
write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/) write a user-readable message describing the change for [CHANGELOG.md](../whats-new/changelog/index.md)
## Push To DockerHub ## Push To DockerHub

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/development/contrib
sidebar_position: 71 sidebar_position: 71
sidebar_label: Third-Party Libraries sidebar_label: Third-Party Libraries
description: A list of third-party libraries used description: A list of third-party libraries used

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/development/developer-instruction
sidebar_position: 61 sidebar_position: 61
sidebar_label: Getting Started sidebar_label: Getting Started
description: Prerequisites and an overview of how to build ClickHouse description: Prerequisites and an overview of how to build ClickHouse
@ -285,9 +286,4 @@ If you are not interested in functionality provided by third-party libraries, yo
-DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0 -DENABLE_LIBRARIES=0 -DENABLE_EMBEDDED_COMPILER=0
``` ```
Compressing the binary at the end of the build may take a while, disable the self-extraction feature via
```
-DENABLE_CLICKHOUSE_SELF_EXTRACTING=0
```
In case of problems with any of the development options, you are on your own! In case of problems with any of the development options, you are on your own!

View File

@ -1,8 +0,0 @@
---
sidebar_label: Development
sidebar_position: 58
---
# ClickHouse Development
[Original article](https://clickhouse.com/docs/en/development/) <!--hide-->

View File

@ -1,3 +1,6 @@
---
slug: /en/development/integrating_rust_libraries
---
# Integrating Rust libraries # Integrating Rust libraries
Rust library integration will be described based on BLAKE3 hash-function integration. Rust library integration will be described based on BLAKE3 hash-function integration.

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/development/style
sidebar_position: 69 sidebar_position: 69
sidebar_label: C++ Guide sidebar_label: C++ Guide
description: A list of recommendations regarding coding style, naming convention, formatting and more description: A list of recommendations regarding coding style, naming convention, formatting and more

View File

@ -1,11 +1,11 @@
--- ---
slug: /en/development/tests
sidebar_position: 70 sidebar_position: 70
sidebar_label: Testing sidebar_label: Testing
title: ClickHouse Testing
description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. description: Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.
--- ---
# ClickHouse Testing
## Functional Tests ## Functional Tests
Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way. Functional tests are the most simple and convenient to use. Most of ClickHouse features can be tested with functional tests and they are mandatory to use for every change in ClickHouse code that can be tested that way.

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/atomic
sidebar_label: Atomic sidebar_label: Atomic
sidebar_position: 10 sidebar_position: 10
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/
toc_folder_title: Database Engines toc_folder_title: Database Engines
toc_priority: 27 toc_priority: 27
toc_title: Introduction toc_title: Introduction

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/lazy
sidebar_label: Lazy sidebar_label: Lazy
sidebar_position: 20 sidebar_position: 20
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/materialized-mysql
sidebar_label: MaterializedMySQL sidebar_label: MaterializedMySQL
sidebar_position: 70 sidebar_position: 70
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/materialized-postgresql
sidebar_label: MaterializedPostgreSQL sidebar_label: MaterializedPostgreSQL
sidebar_position: 60 sidebar_position: 60
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/mysql
sidebar_position: 50 sidebar_position: 50
sidebar_label: MySQL sidebar_label: MySQL
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/postgresql
sidebar_position: 40 sidebar_position: 40
sidebar_label: PostgreSQL sidebar_label: PostgreSQL
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/replicated
sidebar_position: 30 sidebar_position: 30
sidebar_label: Replicated sidebar_label: Replicated
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/database-engines/sqlite
sidebar_position: 55 sidebar_position: 55
sidebar_label: SQLite sidebar_label: SQLite
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/
toc_folder_title: Table Engines toc_folder_title: Table Engines
toc_priority: 26 toc_priority: 26
toc_title: Introduction toc_title: Introduction

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/ExternalDistributed
sidebar_position: 12 sidebar_position: 12
sidebar_label: ExternalDistributed sidebar_label: ExternalDistributed
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/embedded-rocksdb
sidebar_position: 9 sidebar_position: 9
sidebar_label: EmbeddedRocksDB sidebar_label: EmbeddedRocksDB
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/hdfs
sidebar_position: 6 sidebar_position: 6
sidebar_label: HDFS sidebar_label: HDFS
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/hive
sidebar_position: 4 sidebar_position: 4
sidebar_label: Hive sidebar_label: Hive
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/
sidebar_position: 40 sidebar_position: 40
sidebar_label: Integrations sidebar_label: Integrations
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/jdbc
sidebar_position: 3 sidebar_position: 3
sidebar_label: JDBC sidebar_label: JDBC
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/kafka
sidebar_position: 8 sidebar_position: 8
sidebar_label: Kafka sidebar_label: Kafka
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/materialized-postgresql
sidebar_position: 12 sidebar_position: 12
sidebar_label: MaterializedPostgreSQL sidebar_label: MaterializedPostgreSQL
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/mongodb
sidebar_position: 5 sidebar_position: 5
sidebar_label: MongoDB sidebar_label: MongoDB
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/mysql
sidebar_position: 4 sidebar_position: 4
sidebar_label: MySQL sidebar_label: MySQL
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/nats
sidebar_position: 14 sidebar_position: 14
sidebar_label: NATS sidebar_label: NATS
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/odbc
sidebar_position: 2 sidebar_position: 2
sidebar_label: ODBC sidebar_label: ODBC
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/postgresql
sidebar_position: 11 sidebar_position: 11
sidebar_label: PostgreSQL sidebar_label: PostgreSQL
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/rabbitmq
sidebar_position: 10 sidebar_position: 10
sidebar_label: RabbitMQ sidebar_label: RabbitMQ
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/s3
sidebar_position: 7 sidebar_position: 7
sidebar_label: S3 sidebar_label: S3
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/integrations/sqlite
sidebar_position: 7 sidebar_position: 7
sidebar_label: SQLite sidebar_label: SQLite
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/log-family/
sidebar_position: 20 sidebar_position: 20
sidebar_label: Log Family sidebar_label: Log Family
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/log-family/log
toc_priority: 33 toc_priority: 33
toc_title: Log toc_title: Log
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/log-family/stripelog
toc_priority: 32 toc_priority: 32
toc_title: StripeLog toc_title: StripeLog
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/log-family/tinylog
toc_priority: 34 toc_priority: 34
toc_title: TinyLog toc_title: TinyLog
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/aggregatingmergetree
sidebar_position: 60 sidebar_position: 60
sidebar_label: AggregatingMergeTree sidebar_label: AggregatingMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/collapsingmergetree
sidebar_position: 70 sidebar_position: 70
sidebar_label: CollapsingMergeTree sidebar_label: CollapsingMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/custom-partitioning-key
sidebar_position: 30 sidebar_position: 30
sidebar_label: Custom Partitioning Key sidebar_label: Custom Partitioning Key
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/graphitemergetree
sidebar_position: 90 sidebar_position: 90
sidebar_label: GraphiteMergeTree sidebar_label: GraphiteMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/
sidebar_position: 10 sidebar_position: 10
sidebar_label: MergeTree Family sidebar_label: MergeTree Family
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/mergetree
sidebar_position: 11 sidebar_position: 11
sidebar_label: MergeTree sidebar_label: MergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/replacingmergetree
sidebar_position: 40 sidebar_position: 40
sidebar_label: ReplacingMergeTree sidebar_label: ReplacingMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/replication
sidebar_position: 20 sidebar_position: 20
sidebar_label: Data Replication sidebar_label: Data Replication
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/summingmergetree
sidebar_position: 50 sidebar_position: 50
sidebar_label: SummingMergeTree sidebar_label: SummingMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/mergetree-family/versionedcollapsingmergetree
sidebar_position: 80 sidebar_position: 80
sidebar_label: VersionedCollapsingMergeTree sidebar_label: VersionedCollapsingMergeTree
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/buffer
sidebar_position: 120 sidebar_position: 120
sidebar_label: Buffer sidebar_label: Buffer
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/dictionary
sidebar_position: 20 sidebar_position: 20
sidebar_label: Dictionary sidebar_label: Dictionary
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/distributed
sidebar_position: 10 sidebar_position: 10
sidebar_label: Distributed sidebar_label: Distributed
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/external-data
sidebar_position: 130 sidebar_position: 130
sidebar_label: External Data sidebar_label: External Data
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/file
sidebar_position: 40 sidebar_position: 40
sidebar_label: File sidebar_label: File
--- ---

View File

@ -1,10 +1,10 @@
--- ---
slug: /en/engines/table-engines/special/generate
sidebar_position: 140 sidebar_position: 140
sidebar_label: GenerateRandom sidebar_label: GenerateRandom
title: "GenerateRandom Table Engine"
--- ---
# GenerateRandom Table Engine
The GenerateRandom table engine produces random data for given table schema. The GenerateRandom table engine produces random data for given table schema.
Usage examples: Usage examples:

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/
sidebar_position: 50 sidebar_position: 50
sidebar_label: Special sidebar_label: Special
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/join
sidebar_position: 70 sidebar_position: 70
sidebar_label: Join sidebar_label: Join
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/materializedview
sidebar_position: 100 sidebar_position: 100
sidebar_label: MaterializedView sidebar_label: MaterializedView
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/memory
sidebar_position: 110 sidebar_position: 110
sidebar_label: Memory sidebar_label: Memory
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/merge
sidebar_position: 30 sidebar_position: 30
sidebar_label: Merge sidebar_label: Merge
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/null
sidebar_position: 50 sidebar_position: 50
sidebar_label: 'Null' sidebar_label: 'Null'
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/set
sidebar_position: 60 sidebar_position: 60
sidebar_label: Set sidebar_label: Set
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/url
sidebar_position: 80 sidebar_position: 80
sidebar_label: URL sidebar_label: URL
--- ---
@ -13,6 +14,8 @@ Syntax: `URL(URL [,Format] [,CompressionMethod])`
- The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats). - The `Format` must be one that ClickHouse can use in `SELECT` queries and, if necessary, in `INSERTs`. For the full list of supported formats, see [Formats](../../../interfaces/formats.md#formats).
If this argument is not specified, ClickHouse detectes the format automatically from the suffix of the `URL` parameter. If the suffix of `URL` parameter does not match any supported formats, it fails to create table. For example, for engine expression `URL('http://localhost/test.json')`, `JSON` format is applied.
- `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used. - `CompressionMethod` indicates that whether the HTTP body should be compressed. If the compression is enabled, the HTTP packets sent by the URL engine contain 'Content-Encoding' header to indicate which compression method is used.
To enable compression, please first make sure the remote HTTP endpoint indicated by the `URL` parameter supports corresponding compression algorithm. To enable compression, please first make sure the remote HTTP endpoint indicated by the `URL` parameter supports corresponding compression algorithm.
@ -27,6 +30,11 @@ The supported `CompressionMethod` should be one of following:
- bz2 - bz2
- snappy - snappy
- none - none
- auto
If `CompressionMethod` is not specified, it defaults to `auto`. This means ClickHouse detects compression method from the suffix of `URL` parameter automatically. If the suffix matches any of compression method listed above, corresponding compression is applied or there won't be any compression enabled.
For example, for engine expression `URL('http://localhost/test.gzip')`, `gzip` compression method is applied, but for `URL('http://localhost/test.fr')`, no compression is enabled because the suffix `fr` does not match any compression methods above.
## Usage {#using-the-engine-in-the-clickhouse-server} ## Usage {#using-the-engine-in-the-clickhouse-server}

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/engines/table-engines/special/view
sidebar_position: 90 sidebar_position: 90
sidebar_label: View sidebar_label: View
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/amplab-benchmark
sidebar_label: AMPLab Big Data Benchmark sidebar_label: AMPLab Big Data Benchmark
description: A benchmark dataset used for comparing the performance of data warehousing solutions. description: A benchmark dataset used for comparing the performance of data warehousing solutions.
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/brown-benchmark
sidebar_label: Brown University Benchmark sidebar_label: Brown University Benchmark
description: A new analytical benchmark for machine-generated log data description: A new analytical benchmark for machine-generated log data
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/cell-towers
sidebar_label: Cell Towers sidebar_label: Cell Towers
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/criteo
sidebar_label: Terabyte Click Logs from Criteo sidebar_label: Terabyte Click Logs from Criteo
--- ---

View File

@ -1,9 +1,9 @@
--- ---
slug: /en/getting-started/example-datasets/github-events
sidebar_label: GitHub Events sidebar_label: GitHub Events
title: "GitHub Events Dataset"
--- ---
# GitHub Events Dataset
Dataset contains all events on GitHub from 2011 to Dec 6 2020, the size is 3.1 billion records. Download size is 75 GB and it will require up to 200 GB space on disk if stored in a table with lz4 compression. Dataset contains all events on GitHub from 2011 to Dec 6 2020, the size is 3.1 billion records. Download size is 75 GB and it will require up to 200 GB space on disk if stored in a table with lz4 compression.
Full dataset description, insights, download instruction and interactive queries are posted [here](https://ghe.clickhouse.tech/). Full dataset description, insights, download instruction and interactive queries are posted [here](https://ghe.clickhouse.tech/).

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/menus
sidebar_label: New York Public Library "What's on the Menu?" Dataset sidebar_label: New York Public Library "What's on the Menu?" Dataset
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/metrica
sidebar_label: Web Analytics Data sidebar_label: Web Analytics Data
description: Dataset consisting of two tables containing anonymized web analytics data with hits and visits description: Dataset consisting of two tables containing anonymized web analytics data with hits and visits
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/nyc-taxi
sidebar_label: New York Taxi Data sidebar_label: New York Taxi Data
sidebar_position: 2 sidebar_position: 2
description: Data for billions of taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009 description: Data for billions of taxi and for-hire vehicle (Uber, Lyft, etc.) trips originating in New York City since 2009

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/ontime
sidebar_label: OnTime Airline Flight Data sidebar_label: OnTime Airline Flight Data
description: Dataset containing the on-time performance of airline flights description: Dataset containing the on-time performance of airline flights
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/opensky
sidebar_label: Air Traffic Data sidebar_label: Air Traffic Data
description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic. description: The data in this dataset is derived and cleaned from the full OpenSky dataset to illustrate the development of air traffic during the COVID-19 pandemic.
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/recipes
sidebar_label: Recipes Dataset sidebar_label: Recipes Dataset
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/star-schema
sidebar_label: Star Schema Benchmark sidebar_label: Star Schema Benchmark
description: "Dataset based on the TPC-H dbgen source. The coding style and architecture description: "Dataset based on the TPC-H dbgen source. The coding style and architecture
follows the TPCH dbgen." follows the TPCH dbgen."

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/uk-price-paid
sidebar_label: UK Property Price Paid sidebar_label: UK Property Price Paid
sidebar_position: 1 sidebar_position: 1
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/getting-started/example-datasets/wikistat
sidebar_label: WikiStat sidebar_label: WikiStat
--- ---

View File

@ -140,8 +140,6 @@ do
|| curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz" || curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
done done
exit 0
tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \
|| tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz" || tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"
sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh" sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh"
@ -152,7 +150,7 @@ sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh"
tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \
|| tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz" || tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"
sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure
sudo /etc/init.d/clickhouse-server start sudo /etc/init.d/clickhouse-server start
tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \ tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/interfaces/cli
sidebar_position: 17 sidebar_position: 17
sidebar_label: Command-Line Client sidebar_label: Command-Line Client
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/interfaces/cpp
sidebar_position: 24 sidebar_position: 24
sidebar_label: C++ Client Library sidebar_label: C++ Client Library
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/interfaces/formats
sidebar_position: 21 sidebar_position: 21
sidebar_label: Input and Output Formats sidebar_label: Input and Output Formats
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/interfaces/grpc
sidebar_position: 19 sidebar_position: 19
sidebar_label: gRPC Interface sidebar_label: gRPC Interface
--- ---

View File

@ -1,4 +1,5 @@
--- ---
slug: /en/interfaces/http
sidebar_position: 19 sidebar_position: 19
sidebar_label: HTTP Interface sidebar_label: HTTP Interface
--- ---

Some files were not shown because too many files have changed in this diff Show More