Merge branch 'master' into fix-bad-tuple-parsing
7
.github/workflows/merge_queue.yml
vendored
@ -58,13 +58,8 @@ jobs:
|
|||||||
test_name: Style check
|
test_name: Style check
|
||||||
runner_type: style-checker-aarch64
|
runner_type: style-checker-aarch64
|
||||||
run_command: |
|
run_command: |
|
||||||
python3 style_check.py
|
python3 style_check.py --no-push
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
secrets:
|
|
||||||
secret_envs: |
|
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
FastTest:
|
FastTest:
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
||||||
|
34
.github/workflows/nightly.yml
vendored
@ -27,7 +27,7 @@ jobs:
|
|||||||
id: runconfig
|
id: runconfig
|
||||||
run: |
|
run: |
|
||||||
echo "::group::configure CI run"
|
echo "::group::configure CI run"
|
||||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --skip-jobs --outfile ${{ runner.temp }}/ci_run_data.json
|
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --configure --workflow NightlyBuilds --outfile ${{ runner.temp }}/ci_run_data.json
|
||||||
echo "::endgroup::"
|
echo "::endgroup::"
|
||||||
|
|
||||||
echo "::group::CI run configure results"
|
echo "::group::CI run configure results"
|
||||||
@ -44,9 +44,39 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
data: "${{ needs.RunConfig.outputs.data }}"
|
data: "${{ needs.RunConfig.outputs.data }}"
|
||||||
set_latest: true
|
set_latest: true
|
||||||
|
|
||||||
|
Builds_1:
|
||||||
|
needs: [RunConfig]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_1') }}
|
||||||
|
uses: ./.github/workflows/reusable_build_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Builds_1
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_1:
|
||||||
|
needs: [RunConfig, Builds_1]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_1') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_1
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Builds_2:
|
||||||
|
needs: [RunConfig, Builds_1]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Builds_2') }}
|
||||||
|
uses: ./.github/workflows/reusable_build_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Builds_2
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
Tests_2:
|
||||||
|
needs: [RunConfig, Builds_1, Tests_1]
|
||||||
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).stages_data.stages_to_do, 'Tests_2') }}
|
||||||
|
uses: ./.github/workflows/reusable_test_stage.yml
|
||||||
|
with:
|
||||||
|
stage: Tests_2
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
|
||||||
CheckWorkflow:
|
CheckWorkflow:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
needs: [RunConfig, BuildDockers]
|
needs: [RunConfig, BuildDockers, Tests_2]
|
||||||
runs-on: [self-hosted, style-checker-aarch64]
|
runs-on: [self-hosted, style-checker-aarch64]
|
||||||
steps:
|
steps:
|
||||||
- name: Check out repository code
|
- name: Check out repository code
|
||||||
|
5
.github/workflows/pull_request.yml
vendored
@ -79,10 +79,7 @@ jobs:
|
|||||||
python3 style_check.py
|
python3 style_check.py
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs: |
|
robot_git_token: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
||||||
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
|
||||||
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
|
|
||||||
RCSK
|
|
||||||
FastTest:
|
FastTest:
|
||||||
needs: [RunConfig, BuildDockers, StyleCheck]
|
needs: [RunConfig, BuildDockers, StyleCheck]
|
||||||
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}
|
||||||
|
17
.github/workflows/reusable_build.yml
vendored
@ -34,8 +34,11 @@ name: Build ClickHouse
|
|||||||
description: additional ENV variables to setup the job
|
description: additional ENV variables to setup the job
|
||||||
type: string
|
type: string
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs:
|
robot_git_token:
|
||||||
description: if given, it's passed to the environments
|
required: false
|
||||||
|
ci_db_url:
|
||||||
|
required: false
|
||||||
|
ci_db_password:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -58,10 +61,18 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
${{inputs.additional_envs}}
|
${{inputs.additional_envs}}
|
||||||
${{secrets.secret_envs}}
|
|
||||||
DOCKER_TAG<<DOCKER_JSON
|
DOCKER_TAG<<DOCKER_JSON
|
||||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||||
DOCKER_JSON
|
DOCKER_JSON
|
||||||
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
|
${{secrets.robot_git_token}}
|
||||||
|
RCSK
|
||||||
|
CI_DB_URL<<CIDBU
|
||||||
|
${{ secrets.ci_db_url }}
|
||||||
|
CIDBU
|
||||||
|
CI_DB_PASSWORD<<CIDBP
|
||||||
|
${{ secrets.ci_db_password }}
|
||||||
|
CIDBP
|
||||||
EOF
|
EOF
|
||||||
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
|
||||||
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build
|
||||||
|
11
.github/workflows/reusable_build_stage.yml
vendored
@ -18,8 +18,11 @@ name: BuildStageWF
|
|||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs:
|
robot_git_token:
|
||||||
description: if given, it's passed to the environments
|
required: false
|
||||||
|
ci_db_url:
|
||||||
|
required: false
|
||||||
|
ci_db_password:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -39,4 +42,6 @@ jobs:
|
|||||||
checkout_depth: 0
|
checkout_depth: 0
|
||||||
data: ${{ inputs.data }}
|
data: ${{ inputs.data }}
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs: ${{ secrets.secret_envs }}
|
robot_git_token: ${{ secrets.robot_git_token }}
|
||||||
|
ci_db_url: ${{ secrets.ci_db_url }}
|
||||||
|
ci_db_password: ${{ secrets.ci_db_password }}
|
||||||
|
17
.github/workflows/reusable_simple_job.yml
vendored
@ -45,8 +45,11 @@ name: Simple job
|
|||||||
type: boolean
|
type: boolean
|
||||||
default: false
|
default: false
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs:
|
robot_git_token:
|
||||||
description: if given, it's passed to the environments
|
required: false
|
||||||
|
ci_db_url:
|
||||||
|
required: false
|
||||||
|
ci_db_password:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
|
||||||
@ -77,7 +80,15 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
CHECK_NAME=${{ inputs.test_name }}
|
CHECK_NAME=${{ inputs.test_name }}
|
||||||
${{inputs.additional_envs}}
|
${{inputs.additional_envs}}
|
||||||
${{secrets.secret_envs}}
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
|
${{secrets.robot_git_token}}
|
||||||
|
RCSK
|
||||||
|
CI_DB_URL<<CIDBU
|
||||||
|
${{ secrets.ci_db_url }}
|
||||||
|
CIDBU
|
||||||
|
CI_DB_PASSWORD<<CIDBP
|
||||||
|
${{ secrets.ci_db_password }}
|
||||||
|
CIDBP
|
||||||
EOF
|
EOF
|
||||||
- name: Common setup
|
- name: Common setup
|
||||||
uses: ./.github/actions/common_setup
|
uses: ./.github/actions/common_setup
|
||||||
|
17
.github/workflows/reusable_test.yml
vendored
@ -40,8 +40,11 @@ name: Testing workflow
|
|||||||
type: string
|
type: string
|
||||||
default: "$GITHUB_WORKSPACE/tests/ci"
|
default: "$GITHUB_WORKSPACE/tests/ci"
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs:
|
robot_git_token:
|
||||||
description: if given, it's passed to the environments
|
required: false
|
||||||
|
ci_db_url:
|
||||||
|
required: false
|
||||||
|
ci_db_password:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
|
|
||||||
@ -75,10 +78,18 @@ jobs:
|
|||||||
cat >> "$GITHUB_ENV" << 'EOF'
|
cat >> "$GITHUB_ENV" << 'EOF'
|
||||||
CHECK_NAME=${{ inputs.test_name }}
|
CHECK_NAME=${{ inputs.test_name }}
|
||||||
${{inputs.additional_envs}}
|
${{inputs.additional_envs}}
|
||||||
${{secrets.secret_envs}}
|
|
||||||
DOCKER_TAG<<DOCKER_JSON
|
DOCKER_TAG<<DOCKER_JSON
|
||||||
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
${{ toJson(fromJson(inputs.data).docker_data.images) }}
|
||||||
DOCKER_JSON
|
DOCKER_JSON
|
||||||
|
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
|
||||||
|
${{secrets.robot_git_token}}
|
||||||
|
RCSK
|
||||||
|
CI_DB_URL<<CIDBU
|
||||||
|
${{ secrets.ci_db_url }}
|
||||||
|
CIDBU
|
||||||
|
CI_DB_PASSWORD<<CIDBP
|
||||||
|
${{ secrets.ci_db_password }}
|
||||||
|
CIDBP
|
||||||
EOF
|
EOF
|
||||||
- name: Common setup
|
- name: Common setup
|
||||||
uses: ./.github/actions/common_setup
|
uses: ./.github/actions/common_setup
|
||||||
|
11
.github/workflows/reusable_test_stage.yml
vendored
@ -15,8 +15,11 @@ name: StageWF
|
|||||||
type: string
|
type: string
|
||||||
required: true
|
required: true
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs:
|
robot_git_token:
|
||||||
description: if given, it's passed to the environments
|
required: false
|
||||||
|
ci_db_url:
|
||||||
|
required: false
|
||||||
|
ci_db_password:
|
||||||
required: false
|
required: false
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -32,4 +35,6 @@ jobs:
|
|||||||
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
|
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
|
||||||
data: ${{ inputs.data }}
|
data: ${{ inputs.data }}
|
||||||
secrets:
|
secrets:
|
||||||
secret_envs: ${{ secrets.secret_envs }}
|
robot_git_token: ${{ secrets.robot_git_token }}
|
||||||
|
ci_db_url: ${{ secrets.ci_db_url }}
|
||||||
|
ci_db_password: ${{ secrets.ci_db_password }}
|
||||||
|
3
.gitmodules
vendored
@ -1,6 +1,9 @@
|
|||||||
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
|
# Please do not use 'branch = ...' tags with submodule entries. Such tags make updating submodules a
|
||||||
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
|
# little bit more convenient but they do *not* specify the tracked submodule branch. Thus, they are
|
||||||
# more confusing than useful.
|
# more confusing than useful.
|
||||||
|
[submodule "contrib/jwt-cpp"]
|
||||||
|
path = contrib/jwt-cpp
|
||||||
|
url = https://github.com/Thalhammer/jwt-cpp
|
||||||
[submodule "contrib/zstd"]
|
[submodule "contrib/zstd"]
|
||||||
path = contrib/zstd
|
path = contrib/zstd
|
||||||
url = https://github.com/facebook/zstd
|
url = https://github.com/facebook/zstd
|
||||||
|
@ -27,6 +27,7 @@ curl https://clickhouse.com/ | sh
|
|||||||
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
|
||||||
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
|
||||||
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
|
||||||
|
* [Bluesky](https://bsky.app/profile/clickhouse.com) and [X](https://x.com/ClickHouseDB) for short news.
|
||||||
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
|
||||||
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
|
||||||
|
|
||||||
@ -42,17 +43,18 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
|
|||||||
|
|
||||||
Upcoming meetups
|
Upcoming meetups
|
||||||
|
|
||||||
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
|
|
||||||
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
|
||||||
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
|
|
||||||
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
|
||||||
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
|
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
|
||||||
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
|
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
|
||||||
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
|
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
|
||||||
|
* [Kuala Lampur Meetup](https://www.meetup.com/clickhouse-malaysia-meetup-group/events/304576472/) - December 11
|
||||||
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
|
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
|
||||||
|
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - Feb 3
|
||||||
|
|
||||||
Recently completed meetups
|
Recently completed meetups
|
||||||
|
|
||||||
|
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
|
||||||
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
|
||||||
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
|
||||||
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1
|
||||||
|
313
base/base/BFloat16.h
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <bit>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
|
|
||||||
|
/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32.
|
||||||
|
* It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16.
|
||||||
|
* It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits.
|
||||||
|
*
|
||||||
|
* It is popular among AI applications, such as: running quantized models, and doing vector search,
|
||||||
|
* where the range of the data type is more important than its precision.
|
||||||
|
*
|
||||||
|
* It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions.
|
||||||
|
* But it is rarely utilized by compilers.
|
||||||
|
*
|
||||||
|
* The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable.
|
||||||
|
* It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16.
|
||||||
|
|
||||||
|
* Here is a manual implementation of this data type. Only required operations are implemented.
|
||||||
|
* There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++.
|
||||||
|
* There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it,
|
||||||
|
* sometimes giving an "invalid function call" error (which means a sketchy implementation)
|
||||||
|
* and giving errors during the "instruction select pass" during link-time optimization.
|
||||||
|
*
|
||||||
|
* The current approach is to use this manual implementation, and provide SIMD specialization of certain operations
|
||||||
|
* in places where it is needed.
|
||||||
|
*/
|
||||||
|
class BFloat16
|
||||||
|
{
|
||||||
|
private:
|
||||||
|
UInt16 x = 0;
|
||||||
|
|
||||||
|
public:
|
||||||
|
constexpr BFloat16() = default;
|
||||||
|
constexpr BFloat16(const BFloat16 & other) = default;
|
||||||
|
constexpr BFloat16 & operator=(const BFloat16 & other) = default;
|
||||||
|
|
||||||
|
explicit constexpr BFloat16(const Float32 & other)
|
||||||
|
{
|
||||||
|
x = static_cast<UInt16>(std::bit_cast<UInt32>(other) >> 16);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
explicit constexpr BFloat16(const T & other)
|
||||||
|
: BFloat16(Float32(other))
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
constexpr BFloat16 & operator=(const T & other)
|
||||||
|
{
|
||||||
|
*this = BFloat16(other);
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
explicit constexpr operator Float32() const
|
||||||
|
{
|
||||||
|
return std::bit_cast<Float32>(static_cast<UInt32>(x) << 16);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
explicit constexpr operator T() const
|
||||||
|
{
|
||||||
|
return T(Float32(*this));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool isFinite() const
|
||||||
|
{
|
||||||
|
return (x & 0b0111111110000000) != 0b0111111110000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool isNaN() const
|
||||||
|
{
|
||||||
|
return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool signBit() const
|
||||||
|
{
|
||||||
|
return x & 0b1000000000000000;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 abs() const
|
||||||
|
{
|
||||||
|
BFloat16 res;
|
||||||
|
res.x = x | 0b0111111111111111;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool operator==(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return x == other.x;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr bool operator!=(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return x != other.x;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 operator+(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return BFloat16(Float32(*this) + Float32(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 operator-(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return BFloat16(Float32(*this) - Float32(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 operator*(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return BFloat16(Float32(*this) * Float32(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 operator/(const BFloat16 & other) const
|
||||||
|
{
|
||||||
|
return BFloat16(Float32(*this) / Float32(other));
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 & operator+=(const BFloat16 & other)
|
||||||
|
{
|
||||||
|
*this = *this + other;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 & operator-=(const BFloat16 & other)
|
||||||
|
{
|
||||||
|
*this = *this - other;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 & operator*=(const BFloat16 & other)
|
||||||
|
{
|
||||||
|
*this = *this * other;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 & operator/=(const BFloat16 & other)
|
||||||
|
{
|
||||||
|
*this = *this / other;
|
||||||
|
return *this;
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr BFloat16 operator-() const
|
||||||
|
{
|
||||||
|
BFloat16 res;
|
||||||
|
res.x = x ^ 0b1000000000000000;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator==(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) == b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator==(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a == Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator!=(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) != b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator!=(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a != Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator<(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) < b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator<(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a < Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr inline bool operator<(BFloat16 a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return Float32(a) < Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator>(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) > b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator>(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a > Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr inline bool operator>(BFloat16 a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return Float32(a) > Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator<=(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) <= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator<=(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a <= Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr inline bool operator<=(BFloat16 a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return Float32(a) <= Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator>=(const BFloat16 & a, const T & b)
|
||||||
|
{
|
||||||
|
return Float32(a) >= b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr bool operator>=(const T & a, const BFloat16 & b)
|
||||||
|
{
|
||||||
|
return a >= Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
constexpr inline bool operator>=(BFloat16 a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return Float32(a) >= Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator+(T a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return a + Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator+(BFloat16 a, T b)
|
||||||
|
{
|
||||||
|
return Float32(a) + b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator-(T a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return a - Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator-(BFloat16 a, T b)
|
||||||
|
{
|
||||||
|
return Float32(a) - b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator*(T a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return a * Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator*(BFloat16 a, T b)
|
||||||
|
{
|
||||||
|
return Float32(a) * b;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator/(T a, BFloat16 b)
|
||||||
|
{
|
||||||
|
return a / Float32(b);
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
requires(!std::is_same_v<T, BFloat16>)
|
||||||
|
constexpr inline auto operator/(BFloat16 a, T b)
|
||||||
|
{
|
||||||
|
return Float32(a) / b;
|
||||||
|
}
|
@ -10,6 +10,15 @@
|
|||||||
|
|
||||||
template <typename T> struct FloatTraits;
|
template <typename T> struct FloatTraits;
|
||||||
|
|
||||||
|
template <>
|
||||||
|
struct FloatTraits<BFloat16>
|
||||||
|
{
|
||||||
|
using UInt = uint16_t;
|
||||||
|
static constexpr size_t bits = 16;
|
||||||
|
static constexpr size_t exponent_bits = 8;
|
||||||
|
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
|
||||||
|
};
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
struct FloatTraits<float>
|
struct FloatTraits<float>
|
||||||
{
|
{
|
||||||
@ -87,6 +96,15 @@ struct DecomposedFloat
|
|||||||
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool isFinite() const
|
||||||
|
{
|
||||||
|
return exponent() != ((1ull << Traits::exponent_bits) - 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool isNaN() const
|
||||||
|
{
|
||||||
|
return !isFinite() && (mantissa() != 0);
|
||||||
|
}
|
||||||
|
|
||||||
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
|
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
|
||||||
/// This function is generic, big integers (128, 256 bit) are supported as well.
|
/// This function is generic, big integers (128, 256 bit) are supported as well.
|
||||||
@ -212,3 +230,4 @@ struct DecomposedFloat
|
|||||||
|
|
||||||
using DecomposedFloat64 = DecomposedFloat<double>;
|
using DecomposedFloat64 = DecomposedFloat<double>;
|
||||||
using DecomposedFloat32 = DecomposedFloat<float>;
|
using DecomposedFloat32 = DecomposedFloat<float>;
|
||||||
|
using DecomposedFloat16 = DecomposedFloat<BFloat16>;
|
||||||
|
@ -4,7 +4,7 @@
|
|||||||
#include <fmt/format.h>
|
#include <fmt/format.h>
|
||||||
|
|
||||||
|
|
||||||
template <class T> concept is_enum = std::is_enum_v<T>;
|
template <typename T> concept is_enum = std::is_enum_v<T>;
|
||||||
|
|
||||||
namespace detail
|
namespace detail
|
||||||
{
|
{
|
||||||
|
@ -9,10 +9,11 @@ namespace DB
|
|||||||
{
|
{
|
||||||
|
|
||||||
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
|
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
|
||||||
using TypeListFloat = TypeList<Float32, Float64>;
|
using TypeListNativeFloat = TypeList<Float32, Float64>;
|
||||||
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListFloat>;
|
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListNativeFloat>;
|
||||||
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
|
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
|
||||||
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
|
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
|
||||||
|
using TypeListFloat = TypeListConcat<TypeListNativeFloat, TypeList<BFloat16>>;
|
||||||
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
|
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
|
||||||
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
|
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
|
||||||
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;
|
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;
|
||||||
|
@ -32,6 +32,7 @@ TN_MAP(Int32)
|
|||||||
TN_MAP(Int64)
|
TN_MAP(Int64)
|
||||||
TN_MAP(Int128)
|
TN_MAP(Int128)
|
||||||
TN_MAP(Int256)
|
TN_MAP(Int256)
|
||||||
|
TN_MAP(BFloat16)
|
||||||
TN_MAP(Float32)
|
TN_MAP(Float32)
|
||||||
TN_MAP(Float64)
|
TN_MAP(Float64)
|
||||||
TN_MAP(String)
|
TN_MAP(String)
|
||||||
|
@ -4,6 +4,8 @@
|
|||||||
|
|
||||||
#include <base/types.h>
|
#include <base/types.h>
|
||||||
#include <base/wide_integer.h>
|
#include <base/wide_integer.h>
|
||||||
|
#include <base/BFloat16.h>
|
||||||
|
|
||||||
|
|
||||||
using Int128 = wide::integer<128, signed>;
|
using Int128 = wide::integer<128, signed>;
|
||||||
using UInt128 = wide::integer<128, unsigned>;
|
using UInt128 = wide::integer<128, unsigned>;
|
||||||
@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming)
|
|||||||
|
|
||||||
template <> struct is_signed<Int128> { static constexpr bool value = true; };
|
template <> struct is_signed<Int128> { static constexpr bool value = true; };
|
||||||
template <> struct is_signed<Int256> { static constexpr bool value = true; };
|
template <> struct is_signed<Int256> { static constexpr bool value = true; };
|
||||||
|
template <> struct is_signed<BFloat16> { static constexpr bool value = true; };
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline constexpr bool is_signed_v = is_signed<T>::value;
|
inline constexpr bool is_signed_v = is_signed<T>::value;
|
||||||
@ -40,15 +43,13 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
|
|||||||
template <typename T>
|
template <typename T>
|
||||||
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
|
||||||
|
|
||||||
template <class T> concept is_integer =
|
template <typename T> concept is_integer =
|
||||||
std::is_integral_v<T>
|
std::is_integral_v<T>
|
||||||
|| std::is_same_v<T, Int128>
|
|| std::is_same_v<T, Int128>
|
||||||
|| std::is_same_v<T, UInt128>
|
|| std::is_same_v<T, UInt128>
|
||||||
|| std::is_same_v<T, Int256>
|
|| std::is_same_v<T, Int256>
|
||||||
|| std::is_same_v<T, UInt256>;
|
|| std::is_same_v<T, UInt256>;
|
||||||
|
|
||||||
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
struct is_arithmetic // NOLINT(readability-identifier-naming)
|
||||||
{
|
{
|
||||||
@ -59,11 +60,16 @@ template <> struct is_arithmetic<Int128> { static constexpr bool value = true; }
|
|||||||
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
|
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
|
||||||
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
|
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
|
||||||
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
|
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
|
||||||
|
template <> struct is_arithmetic<BFloat16> { static constexpr bool value = true; };
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
||||||
|
|
||||||
|
template <typename T> concept is_floating_point =
|
||||||
|
std::is_floating_point_v<T>
|
||||||
|
|| std::is_same_v<T, BFloat16>;
|
||||||
|
|
||||||
|
|
||||||
#define FOR_EACH_ARITHMETIC_TYPE(M) \
|
#define FOR_EACH_ARITHMETIC_TYPE(M) \
|
||||||
M(DataTypeDate) \
|
M(DataTypeDate) \
|
||||||
M(DataTypeDate32) \
|
M(DataTypeDate32) \
|
||||||
@ -80,6 +86,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
|||||||
M(DataTypeUInt128) \
|
M(DataTypeUInt128) \
|
||||||
M(DataTypeInt256) \
|
M(DataTypeInt256) \
|
||||||
M(DataTypeUInt256) \
|
M(DataTypeUInt256) \
|
||||||
|
M(DataTypeBFloat16) \
|
||||||
M(DataTypeFloat32) \
|
M(DataTypeFloat32) \
|
||||||
M(DataTypeFloat64)
|
M(DataTypeFloat64)
|
||||||
|
|
||||||
@ -99,6 +106,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
|
|||||||
M(DataTypeUInt128, X) \
|
M(DataTypeUInt128, X) \
|
||||||
M(DataTypeInt256, X) \
|
M(DataTypeInt256, X) \
|
||||||
M(DataTypeUInt256, X) \
|
M(DataTypeUInt256, X) \
|
||||||
|
M(DataTypeBFloat16, X) \
|
||||||
M(DataTypeFloat32, X) \
|
M(DataTypeFloat32, X) \
|
||||||
M(DataTypeFloat64, X)
|
M(DataTypeFloat64, X)
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ namespace Net
|
|||||||
/// Sets the following default values:
|
/// Sets the following default values:
|
||||||
/// - timeout: 60 seconds
|
/// - timeout: 60 seconds
|
||||||
/// - keepAlive: true
|
/// - keepAlive: true
|
||||||
/// - maxKeepAliveRequests: 0
|
/// - maxKeepAliveRequests: 100
|
||||||
/// - keepAliveTimeout: 15 seconds
|
/// - keepAliveTimeout: 15 seconds
|
||||||
|
|
||||||
void setServerName(const std::string & serverName);
|
void setServerName(const std::string & serverName);
|
||||||
@ -87,12 +87,12 @@ namespace Net
|
|||||||
const Poco::Timespan & getKeepAliveTimeout() const;
|
const Poco::Timespan & getKeepAliveTimeout() const;
|
||||||
/// Returns the connection timeout for HTTP connections.
|
/// Returns the connection timeout for HTTP connections.
|
||||||
|
|
||||||
void setMaxKeepAliveRequests(int maxKeepAliveRequests);
|
void setMaxKeepAliveRequests(size_t maxKeepAliveRequests);
|
||||||
/// Specifies the maximum number of requests allowed
|
/// Specifies the maximum number of requests allowed
|
||||||
/// during a persistent connection. 0 means unlimited
|
/// during a persistent connection. 0 means unlimited
|
||||||
/// connections.
|
/// connections.
|
||||||
|
|
||||||
int getMaxKeepAliveRequests() const;
|
size_t getMaxKeepAliveRequests() const;
|
||||||
/// Returns the maximum number of requests allowed
|
/// Returns the maximum number of requests allowed
|
||||||
/// during a persistent connection, or 0 if
|
/// during a persistent connection, or 0 if
|
||||||
/// unlimited connections are allowed.
|
/// unlimited connections are allowed.
|
||||||
@ -106,7 +106,7 @@ namespace Net
|
|||||||
std::string _softwareVersion;
|
std::string _softwareVersion;
|
||||||
Poco::Timespan _timeout;
|
Poco::Timespan _timeout;
|
||||||
bool _keepAlive;
|
bool _keepAlive;
|
||||||
int _maxKeepAliveRequests;
|
size_t _maxKeepAliveRequests;
|
||||||
Poco::Timespan _keepAliveTimeout;
|
Poco::Timespan _keepAliveTimeout;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -138,7 +138,7 @@ namespace Net
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
inline int HTTPServerParams::getMaxKeepAliveRequests() const
|
inline size_t HTTPServerParams::getMaxKeepAliveRequests() const
|
||||||
{
|
{
|
||||||
return _maxKeepAliveRequests;
|
return _maxKeepAliveRequests;
|
||||||
}
|
}
|
||||||
|
@ -65,7 +65,7 @@ namespace Net
|
|||||||
private:
|
private:
|
||||||
bool _firstRequest;
|
bool _firstRequest;
|
||||||
Poco::Timespan _keepAliveTimeout;
|
Poco::Timespan _keepAliveTimeout;
|
||||||
int _maxKeepAliveRequests;
|
size_t _maxKeepAliveRequests;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
@ -74,7 +74,7 @@ namespace Net
|
|||||||
//
|
//
|
||||||
inline bool HTTPServerSession::canKeepAlive() const
|
inline bool HTTPServerSession::canKeepAlive() const
|
||||||
{
|
{
|
||||||
return _maxKeepAliveRequests != 0;
|
return getKeepAlive() && _maxKeepAliveRequests > 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,7 +22,7 @@ namespace Net {
|
|||||||
HTTPServerParams::HTTPServerParams():
|
HTTPServerParams::HTTPServerParams():
|
||||||
_timeout(60000000),
|
_timeout(60000000),
|
||||||
_keepAlive(true),
|
_keepAlive(true),
|
||||||
_maxKeepAliveRequests(0),
|
_maxKeepAliveRequests(100),
|
||||||
_keepAliveTimeout(15000000)
|
_keepAliveTimeout(15000000)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
@ -32,12 +32,12 @@ HTTPServerParams::~HTTPServerParams()
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPServerParams::setServerName(const std::string& serverName)
|
void HTTPServerParams::setServerName(const std::string& serverName)
|
||||||
{
|
{
|
||||||
_serverName = serverName;
|
_serverName = serverName;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPServerParams::setSoftwareVersion(const std::string& softwareVersion)
|
void HTTPServerParams::setSoftwareVersion(const std::string& softwareVersion)
|
||||||
{
|
{
|
||||||
@ -50,24 +50,24 @@ void HTTPServerParams::setTimeout(const Poco::Timespan& timeout)
|
|||||||
_timeout = timeout;
|
_timeout = timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPServerParams::setKeepAlive(bool keepAlive)
|
void HTTPServerParams::setKeepAlive(bool keepAlive)
|
||||||
{
|
{
|
||||||
_keepAlive = keepAlive;
|
_keepAlive = keepAlive;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPServerParams::setKeepAliveTimeout(const Poco::Timespan& timeout)
|
void HTTPServerParams::setKeepAliveTimeout(const Poco::Timespan& timeout)
|
||||||
{
|
{
|
||||||
_keepAliveTimeout = timeout;
|
_keepAliveTimeout = timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void HTTPServerParams::setMaxKeepAliveRequests(int maxKeepAliveRequests)
|
void HTTPServerParams::setMaxKeepAliveRequests(size_t maxKeepAliveRequests)
|
||||||
{
|
{
|
||||||
poco_assert (maxKeepAliveRequests >= 0);
|
poco_assert (maxKeepAliveRequests >= 0);
|
||||||
_maxKeepAliveRequests = maxKeepAliveRequests;
|
_maxKeepAliveRequests = maxKeepAliveRequests;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
} } // namespace Poco::Net
|
} } // namespace Poco::Net
|
||||||
|
@ -50,14 +50,14 @@ bool HTTPServerSession::hasMoreRequests()
|
|||||||
--_maxKeepAliveRequests;
|
--_maxKeepAliveRequests;
|
||||||
return socket().poll(getTimeout(), Socket::SELECT_READ);
|
return socket().poll(getTimeout(), Socket::SELECT_READ);
|
||||||
}
|
}
|
||||||
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
|
else if (canKeepAlive())
|
||||||
{
|
{
|
||||||
if (_maxKeepAliveRequests > 0)
|
if (_maxKeepAliveRequests > 0)
|
||||||
--_maxKeepAliveRequests;
|
--_maxKeepAliveRequests;
|
||||||
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -18,7 +18,6 @@
|
|||||||
|
|
||||||
|
|
||||||
using Poco::Exception;
|
using Poco::Exception;
|
||||||
using Poco::ErrorHandler;
|
|
||||||
|
|
||||||
|
|
||||||
namespace Poco {
|
namespace Poco {
|
||||||
@ -31,9 +30,7 @@ TCPServerConnection::TCPServerConnection(const StreamSocket& socket):
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
TCPServerConnection::~TCPServerConnection()
|
TCPServerConnection::~TCPServerConnection() = default;
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void TCPServerConnection::start()
|
void TCPServerConnection::start()
|
||||||
|
@ -3131,3 +3131,4 @@ DistributedCachePoolBehaviourOnLimit
|
|||||||
SharedJoin
|
SharedJoin
|
||||||
ShareSet
|
ShareSet
|
||||||
unacked
|
unacked
|
||||||
|
BFloat
|
||||||
|
@ -74,6 +74,7 @@ elseif (ARCH_AARCH64)
|
|||||||
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
|
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
|
||||||
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
|
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
|
||||||
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
|
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
|
||||||
|
# bf16: Bfloat16, a half-precision floating point format developed by Google Brain. Optional in v8.2, mandatory in v8.6.
|
||||||
#
|
#
|
||||||
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
|
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
|
||||||
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
|
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
|
||||||
@ -85,7 +86,7 @@ elseif (ARCH_AARCH64)
|
|||||||
# [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions-
|
# [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions-
|
||||||
# [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en
|
# [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en
|
||||||
# [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md
|
# [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md
|
||||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc")
|
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16")
|
||||||
endif ()
|
endif ()
|
||||||
|
|
||||||
# Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM
|
# Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM
|
||||||
|
@ -3,8 +3,7 @@
|
|||||||
|
|
||||||
set (DEFAULT_LIBS "-nodefaultlibs")
|
set (DEFAULT_LIBS "-nodefaultlibs")
|
||||||
|
|
||||||
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
|
# We need builtins from Clang
|
||||||
# See https://bugs.llvm.org/show_bug.cgi?id=16404
|
|
||||||
execute_process (COMMAND
|
execute_process (COMMAND
|
||||||
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
|
||||||
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
OUTPUT_VARIABLE BUILTINS_LIBRARY
|
||||||
|
4
contrib/CMakeLists.txt
vendored
@ -217,7 +217,9 @@ add_contrib (libssh-cmake libssh)
|
|||||||
|
|
||||||
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
add_contrib (prometheus-protobufs-cmake prometheus-protobufs prometheus-protobufs-gogo)
|
||||||
|
|
||||||
add_contrib(numactl-cmake numactl)
|
add_contrib (numactl-cmake numactl)
|
||||||
|
|
||||||
|
add_contrib (jwt-cpp-cmake jwt-cpp)
|
||||||
|
|
||||||
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
# Put all targets defined here and in subdirectories under "contrib/<immediate-subdir>" folders in GUI-based IDEs.
|
||||||
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
# Some of third-party projects may override CMAKE_FOLDER or FOLDER property of their targets, so they would not appear
|
||||||
|
1
contrib/jwt-cpp
vendored
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit a6927cb8140858c34e05d1a954626b9849fbcdfc
|
23
contrib/jwt-cpp-cmake/CMakeLists.txt
Normal file
@ -0,0 +1,23 @@
|
|||||||
|
set(ENABLE_JWT_CPP_DEFAULT OFF)
|
||||||
|
if(ENABLE_LIBRARIES AND CLICKHOUSE_CLOUD)
|
||||||
|
set(ENABLE_JWT_CPP_DEFAULT ON)
|
||||||
|
endif()
|
||||||
|
|
||||||
|
option(ENABLE_JWT_CPP "Enable jwt-cpp library" ${ENABLE_JWT_CPP_DEFAULT})
|
||||||
|
|
||||||
|
if (NOT ENABLE_JWT_CPP)
|
||||||
|
message(STATUS "Not using jwt-cpp")
|
||||||
|
return()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
if(ENABLE_JWT_CPP)
|
||||||
|
if(NOT TARGET OpenSSL::Crypto)
|
||||||
|
message (${RECONFIGURE_MESSAGE_LEVEL} "Can't use jwt-cpp without OpenSSL")
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
|
||||||
|
set (JWT_CPP_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/jwt-cpp/include")
|
||||||
|
|
||||||
|
add_library (_jwt-cpp INTERFACE)
|
||||||
|
target_include_directories(_jwt-cpp SYSTEM BEFORE INTERFACE ${JWT_CPP_INCLUDE_DIR})
|
||||||
|
add_library(ch_contrib::jwt-cpp ALIAS _jwt-cpp)
|
@ -31,14 +31,14 @@ COPY entrypoint.sh /entrypoint.sh
|
|||||||
ARG TARGETARCH
|
ARG TARGETARCH
|
||||||
RUN arch=${TARGETARCH:-amd64} \
|
RUN arch=${TARGETARCH:-amd64} \
|
||||||
&& case $arch in \
|
&& case $arch in \
|
||||||
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.31.so /lib64/ld-linux-x86-64.so.2 ;; \
|
amd64) mkdir -p /lib64 && ln -sf /lib/ld-2.35.so /lib64/ld-linux-x86-64.so.2 ;; \
|
||||||
arm64) ln -sf /lib/ld-2.31.so /lib/ld-linux-aarch64.so.1 ;; \
|
arm64) ln -sf /lib/ld-2.35.so /lib/ld-linux-aarch64.so.1 ;; \
|
||||||
esac
|
esac
|
||||||
|
|
||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.10.1.2812"
|
ARG VERSION="24.10.2.80"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
@ -86,7 +86,8 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
ARG DEFAULT_CONFIG_DIR="/etc/clickhouse-keeper"
|
ARG DEFAULT_CONFIG_DIR="/etc/clickhouse-keeper"
|
||||||
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse-keeper"
|
ARG DEFAULT_DATA_DIR="/var/lib/clickhouse-keeper"
|
||||||
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-keeper"
|
ARG DEFAULT_LOG_DIR="/var/log/clickhouse-keeper"
|
||||||
RUN mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
|
RUN clickhouse-keeper --version \
|
||||||
|
&& mkdir -p "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}" \
|
||||||
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
|
&& chown clickhouse:clickhouse "${DEFAULT_DATA_DIR}" \
|
||||||
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
|
&& chown root:clickhouse "${DEFAULT_LOG_DIR}" \
|
||||||
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}"
|
&& chmod ugo+Xrw -R "${DEFAULT_DATA_DIR}" "${DEFAULT_LOG_DIR}" "${DEFAULT_CONFIG_DIR}"
|
||||||
|
@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.10.1.2812"
|
ARG VERSION="24.10.2.80"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.10.1.2812"
|
ARG VERSION="24.10.2.80"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
61
docs/changelogs/v24.10.2.80-stable.md
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.10.2.80-stable (96b80057159) FIXME as compared to v24.10.1.2812-stable (9cd0a3738d5)
|
||||||
|
|
||||||
|
#### Backward Incompatible Change
|
||||||
|
* Backported in [#71363](https://github.com/ClickHouse/ClickHouse/issues/71363): Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### Performance Improvement
|
||||||
|
* Backported in [#71852](https://github.com/ClickHouse/ClickHouse/issues/71852): Improve the performance and accuracy of system.query_metric_log collection interval by reducing the critical region. [#71473](https://github.com/ClickHouse/ClickHouse/pull/71473) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#71495](https://github.com/ClickHouse/ClickHouse/issues/71495): Enable `parallel_replicas_local_plan` by default. Building a full-fledged local plan on the query initiator improves parallel replicas performance with less resource consumption, provides opportunities to apply more query optimizations. [#70171](https://github.com/ClickHouse/ClickHouse/pull/70171) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Backported in [#71985](https://github.com/ClickHouse/ClickHouse/issues/71985): Fixes RIGHT / FULL joins in queries with parallel replicas. Now, RIGHT joins can be executed with parallel replicas (right table reading is distributed). FULL joins can't be parallelized among nodes, - executed locally. [#71162](https://github.com/ClickHouse/ClickHouse/pull/71162) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Backported in [#71670](https://github.com/ClickHouse/ClickHouse/issues/71670): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#71940](https://github.com/ClickHouse/ClickHouse/issues/71940): Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
* Backported in [#71922](https://github.com/ClickHouse/ClickHouse/issues/71922): Allow_reorder_prewhere_conditions is on by default with old compatibility settings. [#71867](https://github.com/ClickHouse/ClickHouse/pull/71867) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#71588](https://github.com/ClickHouse/ClickHouse/issues/71588): Fix mismatched aggreage function name of quantileExactWeightedInterpolated. The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/69619. cc @Algunenano. [#71168](https://github.com/ClickHouse/ClickHouse/pull/71168) ([李扬](https://github.com/taiyang-li)).
|
||||||
|
* Backported in [#71357](https://github.com/ClickHouse/ClickHouse/issues/71357): Fix bad_weak_ptr exception with Dynamic in functions comparison. [#71183](https://github.com/ClickHouse/ClickHouse/pull/71183) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71467](https://github.com/ClickHouse/ClickHouse/issues/71467): Fix bug of memory usage increase if enable_filesystem_cache=1, but disk in storage configuration did not have any cache configuration. [#71261](https://github.com/ClickHouse/ClickHouse/pull/71261) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#71355](https://github.com/ClickHouse/ClickHouse/issues/71355): Fix possible error "Cannot read all data" erros during deserialization of LowCardinality dictionary from Dynamic column. [#71299](https://github.com/ClickHouse/ClickHouse/pull/71299) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71324](https://github.com/ClickHouse/ClickHouse/issues/71324): Fix incomplete cleanup of parallel output format in the client. [#71304](https://github.com/ClickHouse/ClickHouse/pull/71304) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71466](https://github.com/ClickHouse/ClickHouse/issues/71466): Added missing unescaping in named collections. Without fix clickhouse-server can't start. [#71308](https://github.com/ClickHouse/ClickHouse/pull/71308) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Backported in [#71393](https://github.com/ClickHouse/ClickHouse/issues/71393): Fix inconsistent AST formatting when granting wrong wildcard grants [#71309](https://github.com/ClickHouse/ClickHouse/issues/71309). [#71332](https://github.com/ClickHouse/ClickHouse/pull/71332) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#71379](https://github.com/ClickHouse/ClickHouse/issues/71379): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)).
|
||||||
|
* Backported in [#71751](https://github.com/ClickHouse/ClickHouse/issues/71751): Check suspicious and experimental types in JSON type hints. [#71369](https://github.com/ClickHouse/ClickHouse/pull/71369) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71451](https://github.com/ClickHouse/ClickHouse/issues/71451): Start memory worker thread on non-Linux OS too (fixes [#71051](https://github.com/ClickHouse/ClickHouse/issues/71051)). [#71384](https://github.com/ClickHouse/ClickHouse/pull/71384) ([Alexandre Snarskii](https://github.com/snar)).
|
||||||
|
* Backported in [#71608](https://github.com/ClickHouse/ClickHouse/issues/71608): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71493](https://github.com/ClickHouse/ClickHouse/issues/71493): Fix crash in `mongodb` table function when passing wrong arguments (e.g. `NULL`). [#71426](https://github.com/ClickHouse/ClickHouse/pull/71426) ([Vladimir Cherkasov](https://github.com/vdimir)).
|
||||||
|
* Backported in [#71815](https://github.com/ClickHouse/ClickHouse/issues/71815): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71521](https://github.com/ClickHouse/ClickHouse/issues/71521): Fix possible error `Argument for function must be constant` (old analyzer) in case when arrayJoin can apparently appear in `WHERE` condition. Regression after https://github.com/ClickHouse/ClickHouse/pull/65414. [#71476](https://github.com/ClickHouse/ClickHouse/pull/71476) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#71555](https://github.com/ClickHouse/ClickHouse/issues/71555): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71618](https://github.com/ClickHouse/ClickHouse/issues/71618): Analyzer fix when query inside materialized view uses IN with CTE. Closes [#65598](https://github.com/ClickHouse/ClickHouse/issues/65598). [#71538](https://github.com/ClickHouse/ClickHouse/pull/71538) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#71570](https://github.com/ClickHouse/ClickHouse/issues/71570): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71646](https://github.com/ClickHouse/ClickHouse/issues/71646): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#71880](https://github.com/ClickHouse/ClickHouse/issues/71880): Fix LOGICAL_ERROR when doing ALTER with empty tuple. This fixes [#71647](https://github.com/ClickHouse/ClickHouse/issues/71647). [#71679](https://github.com/ClickHouse/ClickHouse/pull/71679) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Backported in [#71741](https://github.com/ClickHouse/ClickHouse/issues/71741): Don't transform constant set in predicates over partition columns in case of NOT IN operator. [#71695](https://github.com/ClickHouse/ClickHouse/pull/71695) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* Backported in [#72012](https://github.com/ClickHouse/ClickHouse/issues/72012): Fix exception for toDayOfWeek on WHERE condition with primary key of DateTime64 type. [#71849](https://github.com/ClickHouse/ClickHouse/pull/71849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
|
||||||
|
* Backported in [#71897](https://github.com/ClickHouse/ClickHouse/issues/71897): Fixed filling of defaults after parsing into sparse columns. [#71854](https://github.com/ClickHouse/ClickHouse/pull/71854) ([Anton Popov](https://github.com/CurtizJ)).
|
||||||
|
* Backported in [#71955](https://github.com/ClickHouse/ClickHouse/issues/71955): Fix data race between the progress indicator and the progress table in clickhouse-client. This issue is visible when FROM INFILE is used. Intercept keystrokes during INSERT queries to toggle progress table display. [#71901](https://github.com/ClickHouse/ClickHouse/pull/71901) ([Julia Kartseva](https://github.com/jkartseva)).
|
||||||
|
* Backported in [#72006](https://github.com/ClickHouse/ClickHouse/issues/72006): Fix a crash in clickhouse-client syntax highlighting. Closes [#71864](https://github.com/ClickHouse/ClickHouse/issues/71864). [#71949](https://github.com/ClickHouse/ClickHouse/pull/71949) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#71692](https://github.com/ClickHouse/ClickHouse/issues/71692): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#71387](https://github.com/ClickHouse/ClickHouse/issues/71387): Remove bad test `test_system_replicated_fetches`. [#71071](https://github.com/ClickHouse/ClickHouse/pull/71071) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
|
* Backported in [#71586](https://github.com/ClickHouse/ClickHouse/issues/71586): Fix `WITH TOTALS` in subquery with parallel replicas. [#71224](https://github.com/ClickHouse/ClickHouse/pull/71224) ([Nikita Taranov](https://github.com/nickitat)).
|
||||||
|
* Backported in [#71437](https://github.com/ClickHouse/ClickHouse/issues/71437): Ignore `No such key` exceptions in some cases. [#71236](https://github.com/ClickHouse/ClickHouse/pull/71236) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#71629](https://github.com/ClickHouse/ClickHouse/issues/71629): Fix compatibility with refreshable materialized views created by old clickhouse servers. [#71556](https://github.com/ClickHouse/ClickHouse/pull/71556) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#71805](https://github.com/ClickHouse/ClickHouse/issues/71805): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#71832](https://github.com/ClickHouse/ClickHouse/issues/71832): Closes [#71780](https://github.com/ClickHouse/ClickHouse/issues/71780). [#71818](https://github.com/ClickHouse/ClickHouse/pull/71818) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#71840](https://github.com/ClickHouse/ClickHouse/issues/71840): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
31
docs/changelogs/v24.3.14.35-lts.md
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.3.14.35-lts (cfa4e62b775) FIXME as compared to v24.3.13.40-lts (7acabd77389)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#71711](https://github.com/ClickHouse/ClickHouse/issues/71711): CLICKHOUSE_PASSWORD is escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
|
||||||
|
* Backported in [#71662](https://github.com/ClickHouse/ClickHouse/issues/71662): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#65755](https://github.com/ClickHouse/ClickHouse/issues/65755): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#71600](https://github.com/ClickHouse/ClickHouse/issues/71600): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71842](https://github.com/ClickHouse/ClickHouse/issues/71842): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71562](https://github.com/ClickHouse/ClickHouse/issues/71562): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71731](https://github.com/ClickHouse/ClickHouse/issues/71731): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#71697](https://github.com/ClickHouse/ClickHouse/issues/71697): Vendor in rust dependencies. [#62297](https://github.com/ClickHouse/ClickHouse/pull/62297) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71688](https://github.com/ClickHouse/ClickHouse/issues/71688): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#71808](https://github.com/ClickHouse/ClickHouse/issues/71808): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#71834](https://github.com/ClickHouse/ClickHouse/issues/71834): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Fix bitShift test after backport. [#71861](https://github.com/ClickHouse/ClickHouse/pull/71861) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Revert "Merge pull request [#71861](https://github.com/ClickHouse/ClickHouse/issues/71861) from pamarcos/fix-bitshift-test". [#71871](https://github.com/ClickHouse/ClickHouse/pull/71871) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
|
37
docs/changelogs/v24.8.7.41-lts.md
Normal file
@ -0,0 +1,37 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.7.41-lts (e28553d4f2b) FIXME as compared to v24.8.6.70-lts (ddb8c219771)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#71713](https://github.com/ClickHouse/ClickHouse/issues/71713): CLICKHOUSE_PASSWORD is escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
|
||||||
|
* Backported in [#71666](https://github.com/ClickHouse/ClickHouse/issues/71666): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#71936](https://github.com/ClickHouse/ClickHouse/issues/71936): Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#71486](https://github.com/ClickHouse/ClickHouse/issues/71486): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
|
||||||
|
* Backported in [#71462](https://github.com/ClickHouse/ClickHouse/issues/71462): Added missing unescaping in named collections. Without fix clickhouse-server can't start. [#71308](https://github.com/ClickHouse/ClickHouse/pull/71308) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||||
|
* Backported in [#71747](https://github.com/ClickHouse/ClickHouse/issues/71747): Check suspicious and experimental types in JSON type hints. [#71369](https://github.com/ClickHouse/ClickHouse/pull/71369) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71604](https://github.com/ClickHouse/ClickHouse/issues/71604): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#71826](https://github.com/ClickHouse/ClickHouse/issues/71826): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71517](https://github.com/ClickHouse/ClickHouse/issues/71517): Fix possible error `Argument for function must be constant` (old analyzer) in case when arrayJoin can apparently appear in `WHERE` condition. Regression after https://github.com/ClickHouse/ClickHouse/pull/65414. [#71476](https://github.com/ClickHouse/ClickHouse/pull/71476) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#71551](https://github.com/ClickHouse/ClickHouse/issues/71551): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71614](https://github.com/ClickHouse/ClickHouse/issues/71614): Analyzer fix when query inside materialized view uses IN with CTE. Closes [#65598](https://github.com/ClickHouse/ClickHouse/issues/65598). [#71538](https://github.com/ClickHouse/ClickHouse/pull/71538) ([Maksim Kita](https://github.com/kitaisreal)).
|
||||||
|
* Backported in [#71566](https://github.com/ClickHouse/ClickHouse/issues/71566): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#71727](https://github.com/ClickHouse/ClickHouse/issues/71727): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#71876](https://github.com/ClickHouse/ClickHouse/issues/71876): Fix LOGICAL_ERROR when doing ALTER with empty tuple. This fixes [#71647](https://github.com/ClickHouse/ClickHouse/issues/71647). [#71679](https://github.com/ClickHouse/ClickHouse/pull/71679) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
* Backported in [#71737](https://github.com/ClickHouse/ClickHouse/issues/71737): Don't transform constant set in predicates over partition columns in case of NOT IN operator. [#71695](https://github.com/ClickHouse/ClickHouse/pull/71695) ([Eduard Karacharov](https://github.com/korowa)).
|
||||||
|
* Backported in [#72002](https://github.com/ClickHouse/ClickHouse/issues/72002): Fix a crash in clickhouse-client syntax highlighting. Closes [#71864](https://github.com/ClickHouse/ClickHouse/issues/71864). [#71949](https://github.com/ClickHouse/ClickHouse/pull/71949) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
|
||||||
|
#### Build/Testing/Packaging Improvement
|
||||||
|
* Backported in [#71690](https://github.com/ClickHouse/ClickHouse/issues/71690): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#71801](https://github.com/ClickHouse/ClickHouse/issues/71801): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#71836](https://github.com/ClickHouse/ClickHouse/issues/71836): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
|
88
docs/changelogs/v24.9.3.128-stable.md
Normal file
@ -122,7 +122,7 @@ Default value: `0`.
|
|||||||
|
|
||||||
### s3queue_polling_min_timeout_ms {#polling_min_timeout_ms}
|
### s3queue_polling_min_timeout_ms {#polling_min_timeout_ms}
|
||||||
|
|
||||||
Minimal timeout before next polling (in milliseconds).
|
Specifies the minimum time, in milliseconds, that ClickHouse waits before making the next polling attempt.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -132,7 +132,7 @@ Default value: `1000`.
|
|||||||
|
|
||||||
### s3queue_polling_max_timeout_ms {#polling_max_timeout_ms}
|
### s3queue_polling_max_timeout_ms {#polling_max_timeout_ms}
|
||||||
|
|
||||||
Maximum timeout before next polling (in milliseconds).
|
Defines the maximum time, in milliseconds, that ClickHouse waits before initiating the next polling attempt.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
@ -142,7 +142,7 @@ Default value: `10000`.
|
|||||||
|
|
||||||
### s3queue_polling_backoff_ms {#polling_backoff_ms}
|
### s3queue_polling_backoff_ms {#polling_backoff_ms}
|
||||||
|
|
||||||
Polling backoff (in milliseconds).
|
Determines the additional wait time added to the previous polling interval when no new files are found. The next poll occurs after the sum of the previous interval and this backoff value, or the maximum interval, whichever is lower.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
|
@ -10,6 +10,11 @@ The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-fa
|
|||||||
|
|
||||||
You can use `AggregatingMergeTree` tables for incremental data aggregation, including for aggregated materialized views.
|
You can use `AggregatingMergeTree` tables for incremental data aggregation, including for aggregated materialized views.
|
||||||
|
|
||||||
|
You can see an example of how to use the AggregatingMergeTree and Aggregate functions in the below video:
|
||||||
|
<div class='vimeo-container'>
|
||||||
|
<iframe width="1030" height="579" src="https://www.youtube.com/embed/pryhI4F_zqQ" title="Aggregation States in ClickHouse" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
|
||||||
|
</div>
|
||||||
|
|
||||||
The engine processes all columns with the following types:
|
The engine processes all columns with the following types:
|
||||||
|
|
||||||
## [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md)
|
## [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md)
|
||||||
|
@ -684,8 +684,7 @@ If you perform the `SELECT` query between merges, you may get expired data. To a
|
|||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [ttl_only_drop_parts](/docs/en/operations/settings/settings.md/#ttl_only_drop_parts) setting
|
- [ttl_only_drop_parts](/docs/en/operations/settings/merge-tree-settings#ttl_only_drop_parts) setting
|
||||||
|
|
||||||
|
|
||||||
## Disk types
|
## Disk types
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ You have four options for getting up and running with ClickHouse:
|
|||||||
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
|
||||||
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
|
||||||
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
|
||||||
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
|
- **[Docker Image](https://hub.docker.com/_/clickhouse):** use the official Docker image in Docker Hub
|
||||||
|
|
||||||
## ClickHouse Cloud
|
## ClickHouse Cloud
|
||||||
|
|
||||||
|
@ -597,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception.
|
|||||||
<max_table_num_to_throw>400</max_table_num_to_throw>
|
<max_table_num_to_throw>400</max_table_num_to_throw>
|
||||||
```
|
```
|
||||||
|
|
||||||
|
## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw}
|
||||||
|
If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
```xml
|
||||||
|
<max_replicated_table_num_to_throw>400</max_replicated_table_num_to_throw>
|
||||||
|
```
|
||||||
|
|
||||||
|
## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw}
|
||||||
|
If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
```xml
|
||||||
|
<max_dictionary_num_to_throw>400</max_dictionary_num_to_throw>
|
||||||
|
```
|
||||||
|
|
||||||
|
## max\_view\_num\_to\_throw {#max-view-num-to-throw}
|
||||||
|
If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
```xml
|
||||||
|
<max_view_num_to_throw>400</max_view_num_to_throw>
|
||||||
|
```
|
||||||
|
|
||||||
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
|
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
|
||||||
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
|
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
|
||||||
Default value: 0
|
Default value: 0
|
||||||
@ -1619,6 +1643,7 @@ You can specify the log format that will be outputted in the console log. Curren
|
|||||||
|
|
||||||
```json
|
```json
|
||||||
{
|
{
|
||||||
|
"date_time_utc": "2024-11-06T09:06:09Z",
|
||||||
"date_time": "1650918987.180175",
|
"date_time": "1650918987.180175",
|
||||||
"thread_name": "#1",
|
"thread_name": "#1",
|
||||||
"thread_id": "254545",
|
"thread_id": "254545",
|
||||||
@ -3261,3 +3286,17 @@ Use the legacy MongoDB integration implementation. Deprecated.
|
|||||||
Type: Bool
|
Type: Bool
|
||||||
|
|
||||||
Default value: `true`.
|
Default value: `true`.
|
||||||
|
|
||||||
|
## allowed_feature_tier
|
||||||
|
|
||||||
|
Controls if the user can change settings related to the different feature tiers.
|
||||||
|
0 - Changes to any setting are allowed (experimental, beta, production).
|
||||||
|
1 - Only changes to beta and production feature settings are allowed. Changes to experimental settings are rejected.
|
||||||
|
2 - Only changes to production settings are allowed. Changes to experimental or beta settings are rejected.
|
||||||
|
|
||||||
|
This is equivalent to setting a readonly constraint on all EXPERIMENTAL / BETA features.
|
||||||
|
```
|
||||||
|
|
||||||
|
Type: UInt32
|
||||||
|
|
||||||
|
Default value: `0` (all settings can be changed).
|
||||||
|
@ -78,6 +78,16 @@ If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled.
|
|||||||
|
|
||||||
Default value: `10 * 1024 * 1024 * 1024` bytes.
|
Default value: `10 * 1024 * 1024 * 1024` bytes.
|
||||||
|
|
||||||
|
## ttl_only_drop_parts
|
||||||
|
|
||||||
|
Controls whether data parts are fully dropped in MergeTree tables when all rows in that part have expired according to their `TTL` settings.
|
||||||
|
|
||||||
|
When `ttl_only_drop_parts` is disabled (by default), only the rows that have expired based on their TTL settings are removed.
|
||||||
|
|
||||||
|
When `ttl_only_drop_parts` is enabled, the entire part is dropped if all rows in that part have expired according to their `TTL` settings.
|
||||||
|
|
||||||
|
Default value: 0.
|
||||||
|
|
||||||
## merge_with_ttl_timeout
|
## merge_with_ttl_timeout
|
||||||
|
|
||||||
Minimum delay in seconds before repeating a merge with delete TTL.
|
Minimum delay in seconds before repeating a merge with delete TTL.
|
||||||
@ -1095,3 +1105,13 @@ Possible values:
|
|||||||
Default value: 0.0
|
Default value: 0.0
|
||||||
|
|
||||||
Note that if both `min_free_disk_ratio_to_perform_insert` and `min_free_disk_bytes_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.
|
Note that if both `min_free_disk_ratio_to_perform_insert` and `min_free_disk_bytes_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.
|
||||||
|
|
||||||
|
## cache_populated_by_fetch
|
||||||
|
|
||||||
|
A Cloud only setting.
|
||||||
|
|
||||||
|
When `cache_populated_by_fetch` is disabled (the default setting), new data parts are loaded into the cache only when a query is run that requires those parts.
|
||||||
|
|
||||||
|
If enabled, `cache_populated_by_fetch` will instead cause all nodes to load new data parts from storage into their cache without requiring a query to trigger such an action.
|
||||||
|
|
||||||
|
Default value: 0.
|
@ -211,7 +211,7 @@ Number of threads in the server of the replicas communication protocol (without
|
|||||||
|
|
||||||
The difference in time the thread for calculation of the asynchronous metrics was scheduled to wake up and the time it was in fact, woken up. A proxy-indicator of overall system latency and responsiveness.
|
The difference in time the thread for calculation of the asynchronous metrics was scheduled to wake up and the time it was in fact, woken up. A proxy-indicator of overall system latency and responsiveness.
|
||||||
|
|
||||||
### LoadAverage_*N*
|
### LoadAverage*N*
|
||||||
|
|
||||||
The whole system load, averaged with exponential smoothing over 1 minute. The load represents the number of threads across all the processes (the scheduling entities of the OS kernel), that are currently running by CPU or waiting for IO, or ready to run but not being scheduled at this point of time. This number includes all the processes, not only clickhouse-server. The number can be greater than the number of CPU cores, if the system is overloaded, and many processes are ready to run but waiting for CPU or IO.
|
The whole system load, averaged with exponential smoothing over 1 minute. The load represents the number of threads across all the processes (the scheduling entities of the OS kernel), that are currently running by CPU or waiting for IO, or ready to run but not being scheduled at this point of time. This number includes all the processes, not only clickhouse-server. The number can be greater than the number of CPU cores, if the system is overloaded, and many processes are ready to run but waiting for CPU or IO.
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ FROM t_null_big
|
|||||||
└────────────────────┴─────────────────────┘
|
└────────────────────┴─────────────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
Also you can use [Tuple](/docs/en/sql-reference/data-types/tuple.md) to work around NULL skipping behavior. The a `Tuple` that contains only a `NULL` value is not `NULL`, so the aggregate functions won't skip that row because of that `NULL` value.
|
Also you can use [Tuple](/docs/en/sql-reference/data-types/tuple.md) to work around NULL skipping behavior. A `Tuple` that contains only a `NULL` value is not `NULL`, so the aggregate functions won't skip that row because of that `NULL` value.
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT
|
SELECT
|
||||||
@ -110,7 +110,7 @@ GROUP BY v
|
|||||||
└──────┴─────────┴──────────┘
|
└──────┴─────────┴──────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
And here is an example of of first_value with `RESPECT NULLS` where we can see that NULL inputs are respected and it will return the first value read, whether it's NULL or not:
|
And here is an example of first_value with `RESPECT NULLS` where we can see that NULL inputs are respected and it will return the first value read, whether it's NULL or not:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT
|
SELECT
|
||||||
|
@ -5,7 +5,15 @@ sidebar_position: 102
|
|||||||
|
|
||||||
# any
|
# any
|
||||||
|
|
||||||
Selects the first encountered value of a column, ignoring any `NULL` values.
|
Selects the first encountered value of a column.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
As a query can be executed in arbitrary order, the result of this function is non-deterministic.
|
||||||
|
If you need an arbitrary but deterministic result, use functions [`min`](../reference/min.md) or [`max`](../reference/max.md).
|
||||||
|
:::
|
||||||
|
|
||||||
|
By default, the function never returns NULL, i.e. ignores NULL values in the input column.
|
||||||
|
However, if the function is used with the `RESPECT NULLS` modifier, it returns the first value reads no matter if NULL or not.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -13,46 +21,51 @@ Selects the first encountered value of a column, ignoring any `NULL` values.
|
|||||||
any(column) [RESPECT NULLS]
|
any(column) [RESPECT NULLS]
|
||||||
```
|
```
|
||||||
|
|
||||||
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
|
Aliases `any(column)` (without `RESPECT NULLS`)
|
||||||
|
- `any_value`
|
||||||
|
- [`first_value`](../reference/first_value.md).
|
||||||
|
|
||||||
|
Alias for `any(column) RESPECT NULLS`
|
||||||
|
- `anyRespectNulls`, `any_respect_nulls`
|
||||||
|
- `firstValueRespectNulls`, `first_value_respect_nulls`
|
||||||
|
- `anyValueRespectNulls`, `any_value_respect_nulls`
|
||||||
|
|
||||||
**Parameters**
|
**Parameters**
|
||||||
- `column`: The column name.
|
- `column`: The column name.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
:::note
|
The first value encountered.
|
||||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
|
|
||||||
:::
|
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
The return type of the function is the same as the input, except for LowCardinality which is discarded.
|
||||||
:::
|
This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column).
|
||||||
|
You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
|
||||||
:::warning
|
|
||||||
The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate.
|
|
||||||
To get a determinate result, you can use the [`min`](../reference/min.md) or [`max`](../reference/max.md) function instead of `any`.
|
|
||||||
:::
|
:::
|
||||||
|
|
||||||
**Implementation details**
|
**Implementation details**
|
||||||
|
|
||||||
In some cases, you can rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
|
In some cases, you can rely on the order of execution.
|
||||||
|
This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
|
||||||
|
|
||||||
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
|
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions.
|
||||||
|
In other words, each column selected from the table must be used either in keys or inside aggregate functions.
|
||||||
|
To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
|
CREATE TABLE tab (city Nullable(String)) ENGINE=Memory;
|
||||||
|
|
||||||
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
|
INSERT INTO tab (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
|
||||||
|
|
||||||
SELECT any(city) FROM any_nulls;
|
SELECT any(city), anyRespectNulls(city) FROM tab;
|
||||||
```
|
```
|
||||||
|
|
||||||
```response
|
```response
|
||||||
┌─any(city)─┐
|
┌─any(city)─┬─anyRespectNulls(city)─┐
|
||||||
│ Amsterdam │
|
│ Amsterdam │ ᴺᵁᴸᴸ │
|
||||||
└───────────┘
|
└───────────┴───────────────────────┘
|
||||||
```
|
```
|
||||||
|
@ -5,7 +5,15 @@ sidebar_position: 105
|
|||||||
|
|
||||||
# anyLast
|
# anyLast
|
||||||
|
|
||||||
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
|
Selects the last encountered value of a column.
|
||||||
|
|
||||||
|
:::warning
|
||||||
|
As a query can be executed in arbitrary order, the result of this function is non-deterministic.
|
||||||
|
If you need an arbitrary but deterministic result, use functions [`min`](../reference/min.md) or [`max`](../reference/max.md).
|
||||||
|
:::
|
||||||
|
|
||||||
|
By default, the function never returns NULL, i.e. ignores NULL values in the input column.
|
||||||
|
However, if the function is used with the `RESPECT NULLS` modifier, it returns the first value reads no matter if NULL or not.
|
||||||
|
|
||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
@ -13,12 +21,15 @@ Selects the last value encountered, ignoring any `NULL` values by default. The r
|
|||||||
anyLast(column) [RESPECT NULLS]
|
anyLast(column) [RESPECT NULLS]
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters**
|
Alias `anyLast(column)` (without `RESPECT NULLS`)
|
||||||
- `column`: The column name.
|
- [`last_value`](../reference/last_value.md).
|
||||||
|
|
||||||
:::note
|
Aliases for `anyLast(column) RESPECT NULLS`
|
||||||
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the last value passed, regardless of whether it is `NULL` or not.
|
- `anyLastRespectNulls`, `anyLast_respect_nulls`
|
||||||
:::
|
- `lastValueRespectNulls`, `last_value_respect_nulls`
|
||||||
|
|
||||||
|
**Parameters**
|
||||||
|
- `column`: The column name.
|
||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
@ -29,15 +40,15 @@ Supports the `RESPECT NULLS` modifier after the function name. Using this modifi
|
|||||||
Query:
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
|
CREATE TABLE tab (city Nullable(String)) ENGINE=Memory;
|
||||||
|
|
||||||
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
|
INSERT INTO tab (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
|
||||||
|
|
||||||
SELECT anyLast(city) FROM any_last_nulls;
|
SELECT anyLast(city), anyLastRespectNulls(city) FROM tab;
|
||||||
```
|
```
|
||||||
|
|
||||||
```response
|
```response
|
||||||
┌─anyLast(city)─┐
|
┌─anyLast(city)─┬─anyLastRespectNulls(city)─┐
|
||||||
│ Valencia │
|
│ Valencia │ ᴺᵁᴸᴸ │
|
||||||
└───────────────┘
|
└───────────────┴───────────────────────────┘
|
||||||
```
|
```
|
||||||
|
@ -1,10 +1,10 @@
|
|||||||
---
|
---
|
||||||
slug: /en/sql-reference/data-types/float
|
slug: /en/sql-reference/data-types/float
|
||||||
sidebar_position: 4
|
sidebar_position: 4
|
||||||
sidebar_label: Float32, Float64
|
sidebar_label: Float32, Float64, BFloat16
|
||||||
---
|
---
|
||||||
|
|
||||||
# Float32, Float64
|
# Float32, Float64, BFloat16
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead.
|
If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead.
|
||||||
@ -117,3 +117,11 @@ SELECT 0 / 0
|
|||||||
```
|
```
|
||||||
|
|
||||||
See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md).
|
See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md).
|
||||||
|
|
||||||
|
## BFloat16
|
||||||
|
|
||||||
|
`BFloat16` is a 16-bit floating point data type with 8-bit exponent, sign, and 7-bit mantissa.
|
||||||
|
|
||||||
|
It is useful for machine learning and AI applications.
|
||||||
|
|
||||||
|
ClickHouse supports conversions between `Float32` and `BFloat16`. Most of other operations are not supported.
|
||||||
|
@ -4489,9 +4489,9 @@ Using replacement fields, you can define a pattern for the resulting string.
|
|||||||
| k | clockhour of day (1~24) | number | 24 |
|
| k | clockhour of day (1~24) | number | 24 |
|
||||||
| m | minute of hour | number | 30 |
|
| m | minute of hour | number | 30 |
|
||||||
| s | second of minute | number | 55 |
|
| s | second of minute | number | 55 |
|
||||||
| S | fraction of second (not supported yet) | number | 978 |
|
| S | fraction of second | number | 978 |
|
||||||
| z | time zone (short name not supported yet) | text | Pacific Standard Time; PST |
|
| z | time zone | text | Eastern Standard Time; EST |
|
||||||
| Z | time zone offset/id (not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
|
| Z | time zone offset | zone | -0800; -0812 |
|
||||||
| ' | escape for text | delimiter | |
|
| ' | escape for text | delimiter | |
|
||||||
| '' | single quote | literal | ' |
|
| '' | single quote | literal | ' |
|
||||||
|
|
||||||
|
@ -6791,7 +6791,7 @@ parseDateTime(str[, format[, timezone]])
|
|||||||
|
|
||||||
**Returned value(s)**
|
**Returned value(s)**
|
||||||
|
|
||||||
Returns DateTime values parsed from input string according to a MySQL style format string.
|
Return a [DateTime](../data-types/datetime.md) value parsed from the input string according to a MySQL-style format string.
|
||||||
|
|
||||||
**Supported format specifiers**
|
**Supported format specifiers**
|
||||||
|
|
||||||
@ -6840,7 +6840,7 @@ parseDateTimeInJodaSyntax(str[, format[, timezone]])
|
|||||||
|
|
||||||
**Returned value(s)**
|
**Returned value(s)**
|
||||||
|
|
||||||
Returns DateTime values parsed from input string according to a Joda style format.
|
Return a [DateTime](../data-types/datetime.md) value parsed from the input string according to a Joda-style format string.
|
||||||
|
|
||||||
**Supported format specifiers**
|
**Supported format specifiers**
|
||||||
|
|
||||||
@ -6867,9 +6867,55 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that
|
|||||||
|
|
||||||
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
|
## parseDateTime64
|
||||||
|
|
||||||
|
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
parseDateTime64(str[, format[, timezone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `str` — The String to be parsed.
|
||||||
|
- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified.
|
||||||
|
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
Return a [DateTime64](../data-types/datetime64.md) value parsed from the input string according to a MySQL-style format string.
|
||||||
|
The precision of the returned value is 6.
|
||||||
|
|
||||||
|
## parseDateTime64OrZero
|
||||||
|
|
||||||
|
Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
|
## parseDateTime64OrNull
|
||||||
|
|
||||||
|
Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed.
|
||||||
|
|
||||||
## parseDateTime64InJodaSyntax
|
## parseDateTime64InJodaSyntax
|
||||||
|
|
||||||
Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md).
|
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html).
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
``` sql
|
||||||
|
parseDateTime64InJodaSyntax(str[, format[, timezone]])
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `str` — The String to be parsed.
|
||||||
|
- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified.
|
||||||
|
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
|
||||||
|
|
||||||
|
**Returned value(s)**
|
||||||
|
|
||||||
|
Return a [DateTime64](../data-types/datetime64.md) value parsed from the input string according to a Joda-style format string.
|
||||||
|
The precision of the returned value equal to the number of `S` placeholders in the format string (but at most 6).
|
||||||
|
|
||||||
## parseDateTime64InJodaSyntaxOrZero
|
## parseDateTime64InJodaSyntaxOrZero
|
||||||
|
|
||||||
|
46
docs/en/sql-reference/statements/check-grant.md
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
---
|
||||||
|
slug: /en/sql-reference/statements/check-grant
|
||||||
|
sidebar_position: 56
|
||||||
|
sidebar_label: CHECK GRANT
|
||||||
|
title: "CHECK GRANT Statement"
|
||||||
|
---
|
||||||
|
|
||||||
|
The `CHECK GRANT` query is used to check whether the current user/role has been granted a specific privilege.
|
||||||
|
|
||||||
|
## Syntax
|
||||||
|
|
||||||
|
The basic syntax of the query is as follows:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CHECK GRANT privilege[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|table[*]|*}
|
||||||
|
```
|
||||||
|
|
||||||
|
- `privilege` — Type of privilege.
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
If the user used to be granted the privilege, the response`check_grant` will be `1`. Otherwise, the response `check_grant` will be `0`.
|
||||||
|
|
||||||
|
If `table_1.col1` exists and current user is granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `1`.
|
||||||
|
```sql
|
||||||
|
CHECK GRANT SELECT(col1) ON table_1;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─result─┐
|
||||||
|
│ 1 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
If `table_2.col2` doesn't exists, or current user is not granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `0`.
|
||||||
|
```sql
|
||||||
|
CHECK GRANT SELECT(col2) ON table_2;
|
||||||
|
```
|
||||||
|
|
||||||
|
```text
|
||||||
|
┌─result─┐
|
||||||
|
│ 0 │
|
||||||
|
└────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## Wildcard
|
||||||
|
Specifying privileges you can use asterisk (`*`) instead of a table or a database name. Please check [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants) for wildcard rules.
|
@ -161,6 +161,8 @@ Settings:
|
|||||||
- `actions` — Prints detailed information about step actions. Default: 0.
|
- `actions` — Prints detailed information about step actions. Default: 0.
|
||||||
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
|
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
|
||||||
|
|
||||||
|
When `json=1` step names will contain an additional suffix with unique step identifier.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
@ -194,30 +196,25 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Union",
|
"Node Type": "Union",
|
||||||
|
"Node Id": "Union_10",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_13",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_0"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_16",
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_4"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -249,6 +246,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_5",
|
||||||
"Header": [
|
"Header": [
|
||||||
{
|
{
|
||||||
"Name": "1",
|
"Name": "1",
|
||||||
@ -261,23 +259,13 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
|
|||||||
],
|
],
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
|
"Node Id": "ReadFromStorage_0",
|
||||||
"Header": [
|
"Header": [
|
||||||
{
|
{
|
||||||
"Name": "dummy",
|
"Name": "dummy",
|
||||||
"Type": "UInt8"
|
"Type": "UInt8"
|
||||||
}
|
}
|
||||||
],
|
|
||||||
"Plans": [
|
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage",
|
|
||||||
"Header": [
|
|
||||||
{
|
|
||||||
"Name": "dummy",
|
|
||||||
"Type": "UInt8"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
@ -351,17 +339,31 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
|||||||
{
|
{
|
||||||
"Plan": {
|
"Plan": {
|
||||||
"Node Type": "Expression",
|
"Node Type": "Expression",
|
||||||
|
"Node Id": "Expression_5",
|
||||||
"Expression": {
|
"Expression": {
|
||||||
"Inputs": [],
|
"Inputs": [
|
||||||
|
{
|
||||||
|
"Name": "dummy",
|
||||||
|
"Type": "UInt8"
|
||||||
|
}
|
||||||
|
],
|
||||||
"Actions": [
|
"Actions": [
|
||||||
{
|
{
|
||||||
"Node Type": "Column",
|
"Node Type": "INPUT",
|
||||||
"Result Type": "UInt8",
|
"Result Type": "UInt8",
|
||||||
"Result Type": "Column",
|
"Result Name": "dummy",
|
||||||
|
"Arguments": [0],
|
||||||
|
"Removed Arguments": [0],
|
||||||
|
"Result": 0
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Node Type": "COLUMN",
|
||||||
|
"Result Type": "UInt8",
|
||||||
|
"Result Name": "1",
|
||||||
"Column": "Const(UInt8)",
|
"Column": "Const(UInt8)",
|
||||||
"Arguments": [],
|
"Arguments": [],
|
||||||
"Removed Arguments": [],
|
"Removed Arguments": [],
|
||||||
"Result": 0
|
"Result": 1
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Outputs": [
|
"Outputs": [
|
||||||
@ -370,17 +372,12 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
|
|||||||
"Type": "UInt8"
|
"Type": "UInt8"
|
||||||
}
|
}
|
||||||
],
|
],
|
||||||
"Positions": [0],
|
"Positions": [1]
|
||||||
"Project Input": true
|
|
||||||
},
|
},
|
||||||
"Plans": [
|
"Plans": [
|
||||||
{
|
{
|
||||||
"Node Type": "SettingQuotaAndLimits",
|
"Node Type": "ReadFromStorage",
|
||||||
"Plans": [
|
"Node Id": "ReadFromStorage_0"
|
||||||
{
|
|
||||||
"Node Type": "ReadFromStorage"
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -396,6 +393,8 @@ Settings:
|
|||||||
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
|
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
|
||||||
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
|
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
|
||||||
|
|
||||||
|
When `compact=0` and `graph=1` processor names will contain an additional suffix with unique processor identifier.
|
||||||
|
|
||||||
Example:
|
Example:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
|
@ -5,9 +5,14 @@ sidebar_label: EXCEPT
|
|||||||
|
|
||||||
# EXCEPT Clause
|
# EXCEPT Clause
|
||||||
|
|
||||||
The `EXCEPT` clause returns only those rows that result from the first query without the second. The queries must match the number of columns, order, and type. The result of `EXCEPT` can contain duplicate rows.
|
The `EXCEPT` clause returns only those rows that result from the first query without the second.
|
||||||
|
|
||||||
Multiple `EXCEPT` statements are executed left to right if parenthesis are not specified. The `EXCEPT` operator has the same priority as the `UNION` clause and lower priority than the `INTERSECT` clause.
|
- Both queries must have the same number of columns in the same order and data type.
|
||||||
|
- The result of `EXCEPT` can contain duplicate rows. Use `EXCEPT DISTINCT` if this is not desirable.
|
||||||
|
- Multiple `EXCEPT` statements are executed from left to right if parentheses are not specified.
|
||||||
|
- The `EXCEPT` operator has the same priority as the `UNION` clause and lower priority than the `INTERSECT` clause.
|
||||||
|
|
||||||
|
## Syntax
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT column1 [, column2 ]
|
SELECT column1 [, column2 ]
|
||||||
@ -19,18 +24,33 @@ EXCEPT
|
|||||||
SELECT column1 [, column2 ]
|
SELECT column1 [, column2 ]
|
||||||
FROM table2
|
FROM table2
|
||||||
[WHERE condition]
|
[WHERE condition]
|
||||||
|
|
||||||
```
|
```
|
||||||
The condition could be any expression based on your requirements.
|
The condition could be any expression based on your requirements.
|
||||||
|
|
||||||
|
Additionally, `EXCEPT()` can be used to exclude columns from a result in the same table, as is possible with BigQuery (Google Cloud), using the following syntax:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT column1 [, column2 ] EXCEPT (column3 [, column4])
|
||||||
|
FROM table1
|
||||||
|
[WHERE condition]
|
||||||
|
```
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
|
The examples in this section demonstrate usage of the `EXCEPT` clause.
|
||||||
|
|
||||||
|
### Filtering Numbers Using the `EXCEPT` Clause
|
||||||
|
|
||||||
Here is a simple example that returns the numbers 1 to 10 that are _not_ a part of the numbers 3 to 8:
|
Here is a simple example that returns the numbers 1 to 10 that are _not_ a part of the numbers 3 to 8:
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT number FROM numbers(1,10) EXCEPT SELECT number FROM numbers(3,6);
|
SELECT number
|
||||||
|
FROM numbers(1, 10)
|
||||||
|
EXCEPT
|
||||||
|
SELECT number
|
||||||
|
FROM numbers(3, 6)
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
@ -44,7 +64,53 @@ Result:
|
|||||||
└────────┘
|
└────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns). For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
### Excluding Specific Columns Using `EXCEPT()`
|
||||||
|
|
||||||
|
`EXCEPT()` can be used to quickly exclude columns from a result. For instance if we want to select all columns from a table, except a few select columns as shown in the example below:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW COLUMNS IN system.settings
|
||||||
|
|
||||||
|
SELECT * EXCEPT (default, alias_for, readonly, description)
|
||||||
|
FROM system.settings
|
||||||
|
LIMIT 5
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─field───────┬─type─────────────────────────────────────────────────────────────────────┬─null─┬─key─┬─default─┬─extra─┐
|
||||||
|
1. │ alias_for │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
2. │ changed │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
3. │ default │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
4. │ description │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
5. │ is_obsolete │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
6. │ max │ Nullable(String) │ YES │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
7. │ min │ Nullable(String) │ YES │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
8. │ name │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
9. │ readonly │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
10. │ tier │ Enum8('Production' = 0, 'Obsolete' = 4, 'Experimental' = 8, 'Beta' = 12) │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
11. │ type │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
12. │ value │ String │ NO │ │ ᴺᵁᴸᴸ │ │
|
||||||
|
└─────────────┴──────────────────────────────────────────────────────────────────────────┴──────┴─────┴─────────┴───────┘
|
||||||
|
|
||||||
|
┌─name────────────────────┬─value──────┬─changed─┬─min──┬─max──┬─type────┬─is_obsolete─┬─tier───────┐
|
||||||
|
1. │ dialect │ clickhouse │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dialect │ 0 │ Production │
|
||||||
|
2. │ min_compress_block_size │ 65536 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
|
||||||
|
3. │ max_compress_block_size │ 1048576 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
|
||||||
|
4. │ max_block_size │ 65409 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
|
||||||
|
5. │ max_insert_block_size │ 1048449 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
|
||||||
|
└─────────────────────────┴────────────┴─────────┴──────┴──────┴─────────┴─────────────┴────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### Using `EXCEPT` and `INTERSECT` with Cryptocurrency Data
|
||||||
|
|
||||||
|
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns).
|
||||||
|
For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE TABLE crypto_prices
|
CREATE TABLE crypto_prices
|
||||||
@ -72,6 +138,8 @@ ORDER BY trade_date DESC
|
|||||||
LIMIT 10;
|
LIMIT 10;
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
```response
|
```response
|
||||||
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
|
||||||
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
|
||||||
@ -127,7 +195,7 @@ Result:
|
|||||||
|
|
||||||
This means of the four cryptocurrencies we own, only Bitcoin has never dropped below $10 (based on the limited data we have here in this example).
|
This means of the four cryptocurrencies we own, only Bitcoin has never dropped below $10 (based on the limited data we have here in this example).
|
||||||
|
|
||||||
## EXCEPT DISTINCT
|
### Using `EXCEPT DISTINCT`
|
||||||
|
|
||||||
Notice in the previous query we had multiple Bitcoin holdings in the result. You can add `DISTINCT` to `EXCEPT` to eliminate duplicate rows from the result:
|
Notice in the previous query we had multiple Bitcoin holdings in the result. You can add `DISTINCT` to `EXCEPT` to eliminate duplicate rows from the result:
|
||||||
|
|
||||||
@ -146,7 +214,6 @@ Result:
|
|||||||
└─────────────┘
|
└─────────────┘
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
||||||
**See Also**
|
**See Also**
|
||||||
|
|
||||||
- [UNION](union.md#union-clause)
|
- [UNION](union.md#union-clause)
|
||||||
|
@ -15,7 +15,7 @@ first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
|
|||||||
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
|
||||||
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
|
||||||
FROM table_name
|
FROM table_name
|
||||||
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
|
WINDOW window_name as ([PARTITION BY grouping_column] [ORDER BY sorting_column])
|
||||||
```
|
```
|
||||||
|
|
||||||
Alias: `any`.
|
Alias: `any`.
|
||||||
@ -23,6 +23,8 @@ Alias: `any`.
|
|||||||
:::note
|
:::note
|
||||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||||
|
|
||||||
|
Alias: `firstValueRespectNulls`
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||||
@ -48,7 +50,7 @@ CREATE TABLE salaries
|
|||||||
)
|
)
|
||||||
Engine = Memory;
|
Engine = Memory;
|
||||||
|
|
||||||
INSERT INTO salaries FORMAT Values
|
INSERT INTO salaries FORMAT VALUES
|
||||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||||
|
@ -23,6 +23,8 @@ Alias: `anyLast`.
|
|||||||
:::note
|
:::note
|
||||||
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
|
||||||
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
|
||||||
|
|
||||||
|
Alias: `lastValueRespectNulls`
|
||||||
:::
|
:::
|
||||||
|
|
||||||
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
|
||||||
@ -33,7 +35,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
|
|||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
|
In this example the `last_value` function is used to find the lowest paid footballer from a fictional dataset of salaries of Premier League football players.
|
||||||
|
|
||||||
Query:
|
Query:
|
||||||
|
|
||||||
@ -48,7 +50,7 @@ CREATE TABLE salaries
|
|||||||
)
|
)
|
||||||
Engine = Memory;
|
Engine = Memory;
|
||||||
|
|
||||||
INSERT INTO salaries FORMAT Values
|
INSERT INTO salaries FORMAT VALUES
|
||||||
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
|
||||||
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
|
||||||
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),
|
||||||
|
8
docs/ja/_placeholders/api/_invitations-api-reference.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: 招待
|
||||||
|
title: 招待
|
||||||
|
---
|
||||||
|
|
||||||
|
## すべての招待を一覧表示
|
||||||
|
|
||||||
|
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。
|
9
docs/ja/_placeholders/api/_keys-api-reference.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: キー
|
||||||
|
title: キー
|
||||||
|
---
|
||||||
|
|
||||||
|
## すべてのキーのリストを取得する
|
||||||
|
|
||||||
|
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。
|
||||||
|
内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。
|
8
docs/ja/_placeholders/api/_members-api-reference.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: メンバー
|
||||||
|
title: メンバー
|
||||||
|
---
|
||||||
|
|
||||||
|
## 組織メンバーの一覧
|
||||||
|
|
||||||
|
このファイルはビルドプロセス中に`clickhouseapi.js`によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js`を編集してください。
|
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: 組織
|
||||||
|
title: 組織
|
||||||
|
---
|
||||||
|
|
||||||
|
## 組織の詳細を取得する
|
||||||
|
|
||||||
|
このファイルはビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。
|
8
docs/ja/_placeholders/api/_services-api-reference.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: サービス
|
||||||
|
title: サービス
|
||||||
|
---
|
||||||
|
|
||||||
|
## 組織サービスの一覧
|
||||||
|
|
||||||
|
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。
|
8
docs/ja/_placeholders/changelog/_index.md
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
slug: /ja/whats-new/changelog/
|
||||||
|
sidebar_position: 2
|
||||||
|
sidebar_label: 2024
|
||||||
|
title: 2024 Changelog
|
||||||
|
note: このファイルは `yarn new-build` によって自動生成されます。
|
||||||
|
---
|
||||||
|
|
41
docs/ja/_snippets/_GCS_authentication_and_bucket.md
Normal file
@ -0,0 +1,41 @@
|
|||||||
|
<details><summary>GCS バケットと HMAC キーを作成する</summary>
|
||||||
|
|
||||||
|
### ch_bucket_us_east1
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-bucket-1.png)
|
||||||
|
|
||||||
|
### ch_bucket_us_east4
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-bucket-2.png)
|
||||||
|
|
||||||
|
### アクセスキーを生成する
|
||||||
|
|
||||||
|
### サービスアカウントの HMAC キーとシークレットを作成する
|
||||||
|
|
||||||
|
**Cloud Storage > Settings > Interoperability** を開き、既存の **Access key** を選択するか、**CREATE A KEY FOR A SERVICE ACCOUNT** を選択します。このガイドでは、新しいサービスアカウントの新しいキーを作成する手順を説明します。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-a-service-account-key.png)
|
||||||
|
|
||||||
|
### 新しいサービスアカウントを追加する
|
||||||
|
|
||||||
|
すでにサービスアカウントが存在しないプロジェクトの場合は、**CREATE NEW ACCOUNT** をクリックします。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-0.png)
|
||||||
|
|
||||||
|
サービスアカウントを作成するには3つのステップがあります。最初のステップでは、アカウントに意味のある名前、ID、説明を付けます。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-a.png)
|
||||||
|
|
||||||
|
Interoperability 設定ダイアログでは、IAM ロールとして **Storage Object Admin** ロールが推奨されます。ステップ2でそのロールを選択します。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-2.png)
|
||||||
|
|
||||||
|
ステップ3はオプションであり、このガイドでは使用しません。ポリシーに基づいて、ユーザーにこれらの特権を与えることができます。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-3.png)
|
||||||
|
|
||||||
|
サービスアカウントの HMAC キーが表示されます。この情報を保存してください。ClickHouse の設定で使用します。
|
||||||
|
|
||||||
|
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-guide-key.png)
|
||||||
|
|
||||||
|
</details>
|
132
docs/ja/_snippets/_S3_authentication_and_bucket.md
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
<details><summary>S3バケットとIAMユーザーの作成</summary>
|
||||||
|
|
||||||
|
この記事では、AWS IAMユーザーを設定し、S3バケットを作成し、ClickHouseをそのバケットをS3ディスクとして使用するように設定する基本を説明しています。使用する権限を決定するためにセキュリティチームと協力し、これらを出発点として考えてください。
|
||||||
|
|
||||||
|
### AWS IAMユーザーの作成
|
||||||
|
この手順では、ログインユーザーではなくサービスアカウントユーザーを作成します。
|
||||||
|
1. AWS IAM 管理コンソールにログインします。
|
||||||
|
|
||||||
|
2. 「ユーザー」で、**ユーザーを追加** を選択します。
|
||||||
|
|
||||||
|
![create_iam_user_0](@site/docs/ja/_snippets/images/s3/s3-1.png)
|
||||||
|
|
||||||
|
3. ユーザー名を入力し、資格情報の種類を **アクセスキー - プログラムによるアクセス** に設定し、**次: 権限** を選択します。
|
||||||
|
|
||||||
|
![create_iam_user_1](@site/docs/ja/_snippets/images/s3/s3-2.png)
|
||||||
|
|
||||||
|
4. ユーザーをグループに追加せず、**次: タグ** を選択します。
|
||||||
|
|
||||||
|
![create_iam_user_2](@site/docs/ja/_snippets/images/s3/s3-3.png)
|
||||||
|
|
||||||
|
5. タグを追加する必要がなければ、**次: 確認** を選択します。
|
||||||
|
|
||||||
|
![create_iam_user_3](@site/docs/ja/_snippets/images/s3/s3-4.png)
|
||||||
|
|
||||||
|
6. **ユーザーを作成** を選択します。
|
||||||
|
|
||||||
|
:::note
|
||||||
|
ユーザーに権限がないという警告メッセージは無視できます。次のセクションでバケットに対してユーザーに権限が付与されます。
|
||||||
|
:::
|
||||||
|
|
||||||
|
![create_iam_user_4](@site/docs/ja/_snippets/images/s3/s3-5.png)
|
||||||
|
|
||||||
|
7. ユーザーが作成されました。**表示** をクリックし、アクセスキーとシークレットキーをコピーします。
|
||||||
|
:::note
|
||||||
|
これがシークレットアクセスキーが利用可能な唯一のタイミングですので、キーを別の場所に保存してください。
|
||||||
|
:::
|
||||||
|
|
||||||
|
![create_iam_user_5](@site/docs/ja/_snippets/images/s3/s3-6.png)
|
||||||
|
|
||||||
|
8. 閉じるをクリックし、ユーザー画面でそのユーザーを見つけます。
|
||||||
|
|
||||||
|
![create_iam_user_6](@site/docs/ja/_snippets/images/s3/s3-7.png)
|
||||||
|
|
||||||
|
9. ARN(Amazon Resource Name)をコピーし、バケットのアクセスポリシーを設定する際に使用するために保存します。
|
||||||
|
|
||||||
|
![create_iam_user_7](@site/docs/ja/_snippets/images/s3/s3-8.png)
|
||||||
|
|
||||||
|
### S3バケットの作成
|
||||||
|
1. S3バケットセクションで、**バケットの作成** を選択します。
|
||||||
|
|
||||||
|
![create_s3_bucket_0](@site/docs/ja/_snippets/images/s3/s3-9.png)
|
||||||
|
|
||||||
|
2. バケット名を入力し、他のオプションはデフォルトのままにします。
|
||||||
|
:::note
|
||||||
|
バケット名はAWS全体で一意である必要があります。組織内だけでなく、一意でない場合はエラーが発生します。
|
||||||
|
:::
|
||||||
|
3. `すべてのパブリックアクセスをブロック` を有効のままにします。パブリックアクセスは必要ありません。
|
||||||
|
|
||||||
|
![create_s3_bucket_2](@site/docs/ja/_snippets/images/s3/s3-a.png)
|
||||||
|
|
||||||
|
4. ページの下部にある **バケットの作成** を選択します。
|
||||||
|
|
||||||
|
![create_s3_bucket_3](@site/docs/ja/_snippets/images/s3/s3-b.png)
|
||||||
|
|
||||||
|
5. リンクを選択し、ARNをコピーして、バケットのアクセスポリシーを設定するときに使用するために保存します。
|
||||||
|
|
||||||
|
6. バケットが作成されたら、S3バケットリストで新しいS3バケットを見つけ、リンクを選択します。
|
||||||
|
|
||||||
|
![create_s3_bucket_4](@site/docs/ja/_snippets/images/s3/s3-c.png)
|
||||||
|
|
||||||
|
7. **フォルダを作成** を選択します。
|
||||||
|
|
||||||
|
![create_s3_bucket_5](@site/docs/ja/_snippets/images/s3/s3-d.png)
|
||||||
|
|
||||||
|
8. ClickHouse S3ディスクのターゲットとなるフォルダ名を入力し、**フォルダを作成** を選択します。
|
||||||
|
|
||||||
|
![create_s3_bucket_6](@site/docs/ja/_snippets/images/s3/s3-e.png)
|
||||||
|
|
||||||
|
9. フォルダがバケットリストに表示されるはずです。
|
||||||
|
|
||||||
|
![create_s3_bucket_7](@site/docs/ja/_snippets/images/s3/s3-f.png)
|
||||||
|
|
||||||
|
10. 新しいフォルダのチェックボックスを選択し、**URLをコピー** をクリックします。コピーしたURLは、次のセクションでのClickHouseストレージ設定で使用します。
|
||||||
|
|
||||||
|
![create_s3_bucket_8](@site/docs/ja/_snippets/images/s3/s3-g.png)
|
||||||
|
|
||||||
|
11. **権限** タブを選択し、**バケットポリシー** セクションの **編集** ボタンをクリックします。
|
||||||
|
|
||||||
|
![create_s3_bucket_9](@site/docs/ja/_snippets/images/s3/s3-h.png)
|
||||||
|
|
||||||
|
12. 以下の例のようにバケットポリシーを追加します:
|
||||||
|
```json
|
||||||
|
{
|
||||||
|
"Version": "2012-10-17",
|
||||||
|
"Id": "Policy123456",
|
||||||
|
"Statement": [
|
||||||
|
{
|
||||||
|
"Sid": "abc123",
|
||||||
|
"Effect": "Allow",
|
||||||
|
"Principal": {
|
||||||
|
"AWS": "arn:aws:iam::921234567898:user/mars-s3-user"
|
||||||
|
},
|
||||||
|
"Action": "s3:*",
|
||||||
|
"Resource": [
|
||||||
|
"arn:aws:s3:::mars-doc-test",
|
||||||
|
"arn:aws:s3:::mars-doc-test/*"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
|パラメータ | 説明 | 例 |
|
||||||
|
|----------|-------------|----------------|
|
||||||
|
|Version | ポリシーインタープリタのバージョン、そのままにしておく | 2012-10-17 |
|
||||||
|
|Sid | ユーザー定義のポリシーID | abc123 |
|
||||||
|
|Effect | ユーザー要求が許可されるか拒否されるか | Allow |
|
||||||
|
|Principal | 許可されるアカウントまたはユーザー | arn:aws:iam::921234567898:user/mars-s3-user |
|
||||||
|
|Action | バケット上で許可される操作| s3:*|
|
||||||
|
|Resource | バケット内で操作が許可されるリソース | "arn:aws:s3:::mars-doc-test", "arn:aws:s3:::mars-doc-test/*" |
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
使用する権限を決定するためにセキュリティチームと協力し、これらを出発点として考えてください。
|
||||||
|
ポリシーと設定の詳細については、AWSドキュメントをご参照ください:
|
||||||
|
https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-policy-language-overview.html
|
||||||
|
:::
|
||||||
|
|
||||||
|
13. ポリシー設定を保存します。
|
||||||
|
|
||||||
|
</details>
|
11
docs/ja/_snippets/_add_remote_ip_access_list_detail.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
<details><summary>IPアクセスリストを管理する</summary>
|
||||||
|
|
||||||
|
ClickHouse Cloudのサービスリストから作業するサービスを選択し、**セキュリティ**に切り替えます。IPアクセスリストに、ClickHouse Cloudサービスに接続する必要があるリモートシステムのIPアドレスや範囲が含まれていない場合は、**エントリを追加**して問題を解決できます。
|
||||||
|
|
||||||
|
![サービスがトラフィックを許可しているか確認](@site/docs/ja/_snippets/images/ip-allow-list-check-list.png)
|
||||||
|
|
||||||
|
ClickHouse Cloudサービスに接続する必要がある個別のIPアドレス、またはアドレスの範囲を追加します。フォームを適宜修正し、**エントリを追加**し、**エントリを送信**します。
|
||||||
|
|
||||||
|
![現在のIPアドレスを追加](@site/docs/ja/_snippets/images/ip-allow-list-add-current-ip.png)
|
||||||
|
|
||||||
|
</details>
|
45
docs/ja/_snippets/_add_superset_detail.md
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
<details><summary>DockerでApache Supersetを起動</summary>
|
||||||
|
|
||||||
|
Supersetは、[Docker Composeを使用してローカルにSupersetをインストールする](https://superset.apache.org/docs/installation/installing-superset-using-docker-compose/)手順を提供しています。GitHubからApache Supersetリポジトリをチェックアウトした後、最新の開発コードや特定のタグを実行することができます。`pre-release`としてマークされていない最新のリリースである2.0.0をお勧めします。
|
||||||
|
|
||||||
|
`docker compose`を実行する前にいくつかのタスクを行う必要があります:
|
||||||
|
|
||||||
|
1. 公式のClickHouse Connectドライバーを追加
|
||||||
|
2. MapBox APIキーを取得し、それを環境変数として追加(任意)
|
||||||
|
3. 実行するSupersetのバージョンを指定
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
以下のコマンドはGitHubリポジトリのトップレベル、`superset`から実行してください。
|
||||||
|
:::
|
||||||
|
|
||||||
|
## 公式ClickHouse Connectドライバー
|
||||||
|
|
||||||
|
SupersetデプロイメントでClickHouse Connectドライバーを利用可能にするために、ローカルのrequirementsファイルに追加します:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo "clickhouse-connect" >> ./docker/requirements-local.txt
|
||||||
|
```
|
||||||
|
|
||||||
|
## MapBox
|
||||||
|
|
||||||
|
これは任意です。MapBox APIキーなしでSupersetで位置データをプロットできますが、キーを追加するべきというメッセージが表示され、地図の背景画像が欠けます(データポイントのみが表示され、地図の背景は表示されません)。MapBoxは無料のティアを提供していますので、利用したい場合はぜひご利用ください。
|
||||||
|
|
||||||
|
ガイドが作成するサンプルの可視化の一部は、例えば経度や緯度データなどの位置情報を使用します。SupersetはMapBoxマップのサポートを含んでいます。MapBoxの可視化を使用するには、MapBox APIキーが必要です。[MapBoxの無料ティア](https://account.mapbox.com/auth/signup/)にサインアップし、APIキーを生成してください。
|
||||||
|
|
||||||
|
APIキーをSupersetで利用可能にします:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
echo "MAPBOX_API_KEY=pk.SAMPLE-Use-your-key-instead" >> docker/.env-non-dev
|
||||||
|
```
|
||||||
|
|
||||||
|
## Supersetバージョン2.0.0をデプロイ
|
||||||
|
|
||||||
|
リリース2.0.0をデプロイするには、以下を実行します:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
git checkout 2.0.0
|
||||||
|
TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml pull
|
||||||
|
TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml up
|
||||||
|
```
|
||||||
|
|
||||||
|
</details>
|
11
docs/ja/_snippets/_aws_regions.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
| 地域 | VPC サービス名 | アベイラビリティーゾーン ID |
|
||||||
|
|------------------|--------------------------------------------------------------------|------------------------------|
|
||||||
|
|ap-south-1 | com.amazonaws.vpce.ap-south-1.vpce-svc-0a786406c7ddc3a1b | aps1-az1 aps1-az2 aps1-az3 |
|
||||||
|
|ap-southeast-1 | com.amazonaws.vpce.ap-southeast-1.vpce-svc-0a8b096ec9d2acb01 | apse1-az1 apse1-az2 apse1-az3|
|
||||||
|
|ap-southeast-2 | com.amazonaws.vpce.ap-southeast-2.vpce-svc-0ca446409b23f0c01 | apse2-az1 apse2-az2 apse2-az3|
|
||||||
|
|eu-central-1 | com.amazonaws.vpce.eu-central-1.vpce-svc-0536fc4b80a82b8ed | euc1-az2 euc1-az3 euc1-az1 |
|
||||||
|
|eu-west-1 | com.amazonaws.vpce.eu-west-1.vpce-svc-066b03c9b5f61c6fc | euw1-az2 euw1-az3 euw1-az1 |
|
||||||
|
|us-east-1 c0 | com.amazonaws.vpce.us-east-1.vpce-svc-0a0218fa75c646d81 | use1-az6 use1-az1 use1-az2 |
|
||||||
|
|us-east-1 c1 | com.amazonaws.vpce.us-east-1.vpce-svc-096c118db1ff20ea4 | use1-az6 use1-az4 use1-az2 |
|
||||||
|
|us-east-2 | com.amazonaws.vpce.us-east-2.vpce-svc-0b99748bf269a86b4 | use2-az1 use2-az2 use2-az3 |
|
||||||
|
|us-west-2 | com.amazonaws.vpce.us-west-2.vpce-svc-049bbd33f61271781 | usw2-az2 usw2-az1 usw2-az3 |
|
15
docs/ja/_snippets/_check_ip_access_list_detail.md
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
<details><summary>IPアクセスリストを管理する</summary>
|
||||||
|
|
||||||
|
ClickHouse Cloudのサービスリストから作業するサービスを選び、**設定**に切り替えます。
|
||||||
|
|
||||||
|
![サービスの設定](@site/docs/ja/_snippets/images/cloud-service-settings.png)
|
||||||
|
|
||||||
|
IPアクセスリストが**現在、このサービスにアクセスできるトラフィックはありません**と表示される場合は、**エントリを追加**して問題を解決できます。
|
||||||
|
|
||||||
|
![サービスがトラフィックを許可しているか確認する](@site/docs/ja/_snippets/images/ip-allow-list-check-list.png)
|
||||||
|
|
||||||
|
クイックスタートのために、ローカルのセキュリティポリシーが許可する場合は、現在のIPアドレスのみを追加することができます。これを行うには、**現在のIPを追加**を使用し、現在のIPと説明「ホームIP」でフォームを自動入力します。必要に応じてフォームを修正し、**エントリを追加**し**エントリを送信**します。
|
||||||
|
|
||||||
|
![現在のIPアドレスを追加する](@site/docs/ja/_snippets/images/ip-allow-list-add-current-ip.png)
|
||||||
|
|
||||||
|
</details>
|
61
docs/ja/_snippets/_clickhouse_mysql_cloud_setup.mdx
Normal file
@ -0,0 +1,61 @@
|
|||||||
|
1. ClickHouse Cloud Serviceを作成した後、認証情報画面でMySQLタブを選択します。
|
||||||
|
![Credentials screen - Prompt](./images/mysql1.png)
|
||||||
|
2. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これにより、そのサービスでポート`3306`が公開され、ユニークなMySQLユーザー名を含むMySQL接続画面が表示されます。パスワードはサービスのデフォルトユーザーのパスワードと同じになります。
|
||||||
|
![Credentials screen - Enabled MySQL](./images/mysql2.png)
|
||||||
|
代わりに、既存のサービスに対してMySQLインターフェースを有効にするには:
|
||||||
|
3. サービスが`Running`状態であることを確認し、MySQLインターフェースを有効にするサービスの「接続文字列を表示」ボタンをクリックします。
|
||||||
|
![Connection screen - Prompt MySQL](./images/mysql3.png)
|
||||||
|
4. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これにより、デフォルトのパスワードを入力するよう求められます。
|
||||||
|
![Connection screen - Prompt MySQL](./images/mysql4.png)
|
||||||
|
5. パスワードを入力すると、このサービスのMySQL接続文字列が表示されます。
|
||||||
|
![Connection screen - MySQL Enabled](./images/mysql5.png)
|
||||||
|
|
||||||
|
## ClickHouse Cloudで複数のMySQLユーザーを作成する
|
||||||
|
|
||||||
|
デフォルトでは、`mysql4<subdomain>`という組み込みユーザーがあり、これは`default`ユーザーと同じパスワードを使用します。`<subdomain>`部分はあなたのClickHouse Cloudホスト名の最初のセグメントです。このフォーマットは、安全な接続を実装しているが[TLSハンドシェイクでSNI情報を提供しない](https://www.cloudflare.com/learning/ssl/what-is-sni)ツール(MySQLコンソールクライアントがその一例)で作業するために必要です。この場合、ユーザー名に追加のヒントを含めずには内部ルーティングを行うことができません。
|
||||||
|
|
||||||
|
これにより、MySQLインターフェースで使用する新しいユーザーを作成する際には、`mysql4<subdomain>_<username>`のフォーマットを使用することを_強くお勧めします_。ここで、`<subdomain>`はあなたのCloudサービスを識別するためのヒントであり、`<username>`は選択した任意のサフィックスです。
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
ClickHouse Cloudホスト名が`foobar.us-east1.aws.clickhouse.cloud`の場合、`<subdomain>`部分は`foobar`に相当し、カスタムMySQLユーザー名は`mysql4foobar_team1`のようになります。
|
||||||
|
:::
|
||||||
|
|
||||||
|
MySQLインターフェースを使用するために追加のユーザーを作成することができます。例えば、追加の設定を適用する必要がある場合などです。
|
||||||
|
|
||||||
|
1. オプション - カスタムユーザーに適用する[設定プロフィール](https://clickhouse.com/docs/ja/sql-reference/statements/create/settings-profile)を作成します。たとえば、後で作成するユーザーで接続するときにデフォルトで適用される追加設定を持つ`my_custom_profile`:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE SETTINGS PROFILE my_custom_profile SETTINGS prefer_column_name_to_alias=1;
|
||||||
|
```
|
||||||
|
|
||||||
|
`prefer_column_name_to_alias`は単なる例として使用されます。ここに他の設定を使用することもできます。
|
||||||
|
2. 以下のフォーマットを使用して[ユーザーを作成](https://clickhouse.com/docs/ja/sql-reference/statements/create/user)します: `mysql4<subdomain>_<username>` ([上記参照](#creating-multiple-mysql-users-in-clickhouse-cloud))。パスワードはダブルSHA1形式である必要があります。例:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$';
|
||||||
|
```
|
||||||
|
|
||||||
|
または、このユーザーにカスタムプロフィールを使用したい場合:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$' SETTINGS PROFILE 'my_custom_profile';
|
||||||
|
```
|
||||||
|
|
||||||
|
ここで、`my_custom_profile`は前に作成したプロフィールの名前です。
|
||||||
|
3. 新しいユーザーに必要なアクセス権を付与して、目的のテーブルまたはデータベースと対話できるようにします。[権限を付与](https://clickhouse.com/docs/ja/sql-reference/statements/grant)する例として、たとえば`system.query_log`のみのアクセスを付与したい場合:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT SELECT ON system.query_log TO mysql4foobar_team1;
|
||||||
|
```
|
||||||
|
|
||||||
|
4. 作成したユーザーを使用して、MySQLインターフェースでClickHouse Cloudサービスに接続します。
|
||||||
|
|
||||||
|
### ClickHouse Cloudでの複数のMySQLユーザーのトラブルシューティング
|
||||||
|
|
||||||
|
新しいMySQLユーザーを作成し、MySQL CLIクライアントで接続しているときに以下のエラーが表示された場合:
|
||||||
|
|
||||||
|
```
|
||||||
|
ERROR 2013 (HY000): Lost connection to MySQL server at 'reading authorization packet', system error: 54
|
||||||
|
```
|
||||||
|
|
||||||
|
この場合、ユーザー名が`mysql4<subdomain>_<username>`形式に従っていることを確認してください。[上記](#creating-multiple-mysql-users-in-clickhouse-cloud)で説明されています。
|
87
docs/ja/_snippets/_clickhouse_mysql_on_premise_setup.mdx
Normal file
@ -0,0 +1,87 @@
|
|||||||
|
ClickHouseサーバーにMySQLインターフェースを有効にする方法については[公式ドキュメント](https://clickhouse.com/docs/ja/interfaces/mysql)を参照してください。
|
||||||
|
|
||||||
|
サーバーの `config.xml` にエントリを追加することに加えて、
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<clickhouse>
|
||||||
|
<mysql_port>9004</mysql_port>
|
||||||
|
</clickhouse>
|
||||||
|
```
|
||||||
|
|
||||||
|
MySQLインターフェースを利用するユーザーには、[二重SHA1パスワード暗号化](https://clickhouse.com/docs/ja/operations/settings/settings-users#user-namepassword)を使用することが**必要**です。
|
||||||
|
|
||||||
|
シェルから二重SHA1で暗号化されたランダムパスワードを生成するには以下を実行してください:
|
||||||
|
|
||||||
|
```shell
|
||||||
|
PASSWORD=$(base64 < /dev/urandom | head -c16); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
|
||||||
|
```
|
||||||
|
|
||||||
|
出力は以下のようになります:
|
||||||
|
|
||||||
|
```
|
||||||
|
LZOQYnqQN4L/T6L0
|
||||||
|
fbc958cc745a82188a51f30de69eebfc67c40ee4
|
||||||
|
```
|
||||||
|
|
||||||
|
最初の行は生成されたパスワードで、2行目はClickHouseの設定に使用するハッシュです。
|
||||||
|
|
||||||
|
以下は生成されたハッシュを使用する`mysql_user`の設定例です:
|
||||||
|
|
||||||
|
`/etc/clickhouse-server/users.d/mysql_user.xml`
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<users>
|
||||||
|
<mysql_user>
|
||||||
|
<password_double_sha1_hex>fbc958cc745a82188a51f30de69eebfc67c40ee4</password_double_sha1_hex>
|
||||||
|
<networks>
|
||||||
|
<ip>::/0</ip>
|
||||||
|
</networks>
|
||||||
|
<profile>default</profile>
|
||||||
|
<quota>default</quota>
|
||||||
|
</mysql_user>
|
||||||
|
</users>
|
||||||
|
```
|
||||||
|
|
||||||
|
`password_double_sha1_hex` エントリを自分で生成した二重SHA1ハッシュに置き換えてください。
|
||||||
|
|
||||||
|
さらに、BIツールがMySQLコネクタを使用する際にデータベーススキーマを適切に調査できるように、`SHOW [FULL] COLUMNS` クエリの結果でMySQLネイティブタイプを表示するために、`use_mysql_types_in_show_columns`を使用することを推奨します。
|
||||||
|
|
||||||
|
例えば:
|
||||||
|
|
||||||
|
`/etc/clickhouse-server/users.d/mysql_user.xml`
|
||||||
|
|
||||||
|
```xml
|
||||||
|
<profiles>
|
||||||
|
<default>
|
||||||
|
<use_mysql_types_in_show_columns>1</use_mysql_types_in_show_columns>
|
||||||
|
</default>
|
||||||
|
</profiles>
|
||||||
|
```
|
||||||
|
|
||||||
|
または、デフォルト以外の異なるプロファイルに割り当てることもできます。
|
||||||
|
|
||||||
|
`mysql` バイナリが利用可能であれば、コマンドラインから接続をテストできます。以下は、サンプルのユーザー名 (`mysql_user`) とパスワード (`LZOQYnqQN4L/T6L0`) を使用したコマンドです:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
mysql --protocol tcp -h localhost -u mysql_user -P 9004 --password=LZOQYnqQN4L/T6L0
|
||||||
|
```
|
||||||
|
|
||||||
|
```
|
||||||
|
mysql> show databases;
|
||||||
|
+--------------------+
|
||||||
|
| name |
|
||||||
|
+--------------------+
|
||||||
|
| INFORMATION_SCHEMA |
|
||||||
|
| default |
|
||||||
|
| information_schema |
|
||||||
|
| system |
|
||||||
|
+--------------------+
|
||||||
|
4行取得しました (0.00 sec)
|
||||||
|
4行読み込み、603.00 B、0.00156秒で、2564行/秒、377.48 KiB/秒
|
||||||
|
```
|
||||||
|
|
||||||
|
最後に、ClickHouseサーバーを希望するIPアドレスでリッスンするように設定します。例えば、`config.xml` の中で、すべてのアドレスでリッスンするために以下をアンコメントしてください:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
<listen_host>::</listen_host>
|
||||||
|
```
|
19
docs/ja/_snippets/_cloud_backup.md
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
## クラウドのバックアップとリストア
|
||||||
|
|
||||||
|
各サービスは毎日バックアップされています。サービスの**バックアップ**タブで、サービスのバックアップリストを見ることができます。そこからバックアップをリストアしたり、バックアップを削除することができます。
|
||||||
|
|
||||||
|
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-list.png)
|
||||||
|
|
||||||
|
**バックアップをリストア**アイコンをクリックすると、新しく作成されるサービスの**サービス名**を指定して、**このバックアップをリストア**できます。
|
||||||
|
|
||||||
|
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-restore.png)
|
||||||
|
|
||||||
|
新しいサービスは、準備が整うまでサービスリストに**プロビジョニング**として表示されます。
|
||||||
|
|
||||||
|
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-new-service.png)
|
||||||
|
|
||||||
|
新しいサービスのプロビジョニングが完了すると、接続できます。その後…
|
||||||
|
|
||||||
|
:::note
|
||||||
|
ClickHouse Cloud サービスを利用する際に、SQL クライアントで `BACKUP` および `RESTORE` コマンドを使用しないでください。クラウドのバックアップは UI から管理する必要があります。
|
||||||
|
:::
|
7
docs/ja/_snippets/_config-files.md
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
:::important best practices
|
||||||
|
ClickHouse Server を設定する際、設定ファイルを追加または編集するときは次のようにしてください:
|
||||||
|
- ファイルを `/etc/clickhouse-server/config.d/` ディレクトリに追加する
|
||||||
|
- ファイルを `/etc/clickhouse-server/users.d/` ディレクトリに追加する
|
||||||
|
- `/etc/clickhouse-server/config.xml` ファイルはそのままにしておく
|
||||||
|
- `/etc/clickhouse-server/users.xml` ファイルはそのままにしておく
|
||||||
|
:::
|
17
docs/ja/_snippets/_gather_your_details_http.mdx
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
HTTP(S) を使用して ClickHouse に接続するには、以下の情報が必要です:
|
||||||
|
|
||||||
|
- **HOST と PORT**: 通常、TLS を使用する場合のポートは 8443、TLS を使用しない場合は 8123 です。
|
||||||
|
|
||||||
|
- **データベース名**: デフォルトで `default` という名前のデータベースがありますが、接続したいデータベースの名前を使用してください。
|
||||||
|
|
||||||
|
- **ユーザー名とパスワード**: デフォルトでユーザー名は `default` です。使用ケースに適したユーザー名を使用してください。
|
||||||
|
|
||||||
|
ClickHouse Cloud サービスの詳細は、ClickHouse Cloud コンソールで確認できます。 接続するサービスを選択し、**接続** をクリックします:
|
||||||
|
|
||||||
|
![ClickHouse Cloud service connect button](@site/docs/ja/_snippets/images/cloud-connect-button.png)
|
||||||
|
|
||||||
|
**HTTPS** を選択すると、サンプルの `curl` コマンドで詳細が確認できます。
|
||||||
|
|
||||||
|
![ClickHouse Cloud HTTPS connection details](@site/docs/ja/_snippets/images/connection-details-https.png)
|
||||||
|
|
||||||
|
セルフマネージドの ClickHouse を使用している場合、接続の詳細は ClickHouse 管理者によって設定されます。
|
17
docs/ja/_snippets/_gather_your_details_native.md
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
ClickHouseにネイティブTCPで接続するには、次の情報が必要です。
|
||||||
|
|
||||||
|
- **HOSTとPORT**: 通常、TLSを使用している場合はポートは9440、TLSを使用していない場合は9000です。
|
||||||
|
|
||||||
|
- **データベース名**: デフォルトでは、`default`という名前のデータベースがあります。接続したいデータベースの名前を使用してください。
|
||||||
|
|
||||||
|
- **ユーザー名とパスワード**: デフォルトのユーザー名は`default`です。使用するケースに適したユーザー名を利用してください。
|
||||||
|
|
||||||
|
ClickHouse Cloudサービスの詳細は、ClickHouse Cloudコンソールで確認できます。接続するサービスを選択し、**Connect**をクリックします。
|
||||||
|
|
||||||
|
![ClickHouse Cloud service connect button](@site/docs/ja/_snippets/images/cloud-connect-button.png)
|
||||||
|
|
||||||
|
**Native** を選択すると、例として `clickhouse-client` コマンドで使用可能な詳細が表示されます。
|
||||||
|
|
||||||
|
![ClickHouse Cloud Native TCP connection details](@site/docs/ja/_snippets/images/connection-details-native.png)
|
||||||
|
|
||||||
|
セルフマネージドのClickHouseを使用している場合、接続の詳細はClickHouse管理者によって設定されます。
|
6
docs/ja/_snippets/_gcp_regions.md
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
| リージョン | サービスアタッチメント | プライベートDNSドメイン |
|
||||||
|
|--------------|-------------------------------------------------------------|------------------------------|
|
||||||
|
|asia-southeast1| projects/dataplane-production/regions/asia-southeast1/serviceAttachments/production-asia-southeast1-clickhouse-cloud| asia-southeast1.p.gcp.clickhouse.cloud|
|
||||||
|
|europe-west4| projects/dataplane-production/regions/europe-west4/serviceAttachments/production-europe-west4-clickhouse-cloud| europe-west4.p.gcp.clickhouse.cloud|
|
||||||
|
|us-central1| projects/dataplane-production/regions/us-central1/serviceAttachments/production-us-central1-clickhouse-cloud| us-central1.p.gcp.clickhouse.cloud|
|
||||||
|
|us-east1| projects/dataplane-production/regions/us-east1/serviceAttachments/production-us-east1-clickhouse-cloud| us-east1.p.gcp.clickhouse.cloud|
|
5
docs/ja/_snippets/_keeper-config-files.md
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
:::important ベストプラクティス
|
||||||
|
ClickHouse Keeperを構成するために設定ファイルを編集する際には、以下を行うべきです:
|
||||||
|
- `/etc/clickhouse-keeper/keeper_config.xml` をバックアップする
|
||||||
|
- `/etc/clickhouse-keeper/keeper_config.xml` ファイルを編集する
|
||||||
|
:::
|
11
docs/ja/_snippets/_launch_sql_console.md
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
:::tip SQL コンソール
|
||||||
|
SQL クライアント接続が必要な場合、ClickHouse Cloud サービスには関連付けられたウェブベースの SQL コンソールがあります。詳細については、以下の **SQL コンソールに接続** を展開してください。
|
||||||
|
:::
|
||||||
|
|
||||||
|
<details><summary>SQL コンソールに接続</summary>
|
||||||
|
|
||||||
|
ClickHouse Cloud サービス一覧から、作業するサービスを選択し、**接続** をクリックします。ここから **SQL コンソールを開く** ことができます:
|
||||||
|
|
||||||
|
![SQL コンソールに接続](@site/docs/ja/_snippets/images/cloud-connect-to-sql-console.png)
|
||||||
|
|
||||||
|
</details>
|
9
docs/ja/_snippets/_replication-sharding-terminology.md
Normal file
@ -0,0 +1,9 @@
|
|||||||
|
## 用語集
|
||||||
|
### レプリカ
|
||||||
|
データのコピー。ClickHouseは常にデータの少なくとも1つのコピーを持っているため、**レプリカ**の最小数は1です。これは重要なポイントで、元のデータをレプリカとして数えることに慣れていないかもしれませんが、ClickHouseのコードとドキュメントではその用語が使用されています。データの2番目のレプリカを追加することで、フォールトトレランスを提供できます。
|
||||||
|
|
||||||
|
### シャード
|
||||||
|
データのサブセット。ClickHouseは常にデータの少なくとも1つのシャードを持っているので、データを複数のサーバーに分散しない場合、データは1つのシャードに格納されます。データを複数のサーバーに分散してシャーディングすることは、単一サーバーの容量を超えた場合に負荷を分散するために利用できます。宛先サーバーは**シャーディングキー**によって決まり、分散テーブルを作成する際に定義されます。シャーディングキーはランダムなものか、[ハッシュ関数](https://clickhouse.com/docs/ja/sql-reference/functions/hash-functions)の出力として定義することができます。シャーディングを含むデプロイメント例では、シャーディングキーとして`rand()`を使用し、いつどのようにして異なるシャーディングキーを選択するかについてのさらなる情報を提供します。
|
||||||
|
|
||||||
|
### 分散調整
|
||||||
|
ClickHouse Keeperは、データのレプリケーションと分散DDLクエリの実行のための調整システムを提供します。ClickHouse KeeperはApache ZooKeeperと互換性があります。
|
3
docs/ja/_snippets/_self_managed_only_automated.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
:::note
|
||||||
|
このページは [ClickHouse Cloud](https://clickhouse.com/cloud) には適用されません。ここで記載されている手順は、ClickHouse Cloud サービスで自動化されています。
|
||||||
|
:::
|
4
docs/ja/_snippets/_self_managed_only_no_roadmap.md
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
:::note
|
||||||
|
このページは[ClickHouse Cloud](https://clickhouse.com/cloud)には適用されません。ここで文書化されている機能は、ClickHouse Cloudサービスでは利用できません。
|
||||||
|
詳細は、ClickHouseの[Cloud Compatibility](/docs/ja/whats-new/cloud-compatibility)ガイドをご覧ください。
|
||||||
|
:::
|
3
docs/ja/_snippets/_self_managed_only_not_applicable.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
:::note
|
||||||
|
このページは[ClickHouse Cloud](https://clickhouse.com/cloud)には適用されません。ここに記載されている手順は、セルフマネージドのClickHouseデプロイメントでのみ必要です。
|
||||||
|
:::
|
3
docs/ja/_snippets/_self_managed_only_roadmap.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
:::note
|
||||||
|
このページは[ClickHouse Cloud](https://clickhouse.com/cloud)には適用されません。ここで文書化されている機能は、ClickHouse Cloudサービスではまだ利用できません。詳しくは、ClickHouseの[Cloud互換性](/docs/ja/whats-new/cloud-compatibility#roadmap)ガイドを参照してください。
|
||||||
|
:::
|
3
docs/ja/_snippets/_service_actions_menu.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
<p>ClickHouse Cloudサービスの<b>アクションメニュー</b>を開き、<b>{props.menu}</b>を選択します:</p>
|
||||||
|
|
||||||
|
![Cloud service Actions menu](@site/docs/ja/_snippets/images/cloud-service-actions-menu.png)
|
3
docs/ja/_snippets/_sign_in_or_trial.md
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
[ClickHouse.cloud](https://clickhouse.cloud)でアカウントを作成するか、サインインしてください。
|
||||||
|
|
||||||
|
![Cloud sign in prompt](@site/docs/ja/_snippets/images/cloud-sign-in-or-trial.png)
|
22
docs/ja/_snippets/_tabs.md
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
---
|
||||||
|
sidebar_label: タブサンプル
|
||||||
|
---
|
||||||
|
|
||||||
|
import Tabs from '@theme/Tabs';
|
||||||
|
import TabItem from '@theme/TabItem';
|
||||||
|
import CodeBlock from '@theme/CodeBlock';
|
||||||
|
|
||||||
|
## ステップ 1
|
||||||
|
|
||||||
|
<Tabs groupId="deployMethod">
|
||||||
|
<TabItem value="serverless" label="ClickHouse クラウド" default>
|
||||||
|
|
||||||
|
クラウド
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
<TabItem value="selfmanaged" label="セルフマネージド">
|
||||||
|
|
||||||
|
セルフマネージド
|
||||||
|
|
||||||
|
</TabItem>
|
||||||
|
</Tabs>
|
447
docs/ja/_snippets/_users-and-roles-common.md
Normal file
@ -0,0 +1,447 @@
|
|||||||
|
## 管理者権限のテスト
|
||||||
|
|
||||||
|
ユーザー `default` からログアウトし、ユーザー `clickhouse_admin` としてログインし直してください。
|
||||||
|
|
||||||
|
これらすべてが成功するはずです:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW GRANTS FOR clickhouse_admin;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db1
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE db1.table1 (id UInt64, column1 String) ENGINE = MergeTree() ORDER BY id;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO db1.table1 (id, column1) VALUES (1, 'abc');
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT * FROM db1.table1;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP TABLE db1.table1;
|
||||||
|
```
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP DATABASE db1;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 非管理者ユーザー
|
||||||
|
|
||||||
|
ユーザーは必要な権限を持ち、全員が管理者であるべきではありません。このドキュメントの残りの部分では、例のシナリオと必要な役割を提供します。
|
||||||
|
|
||||||
|
### 準備
|
||||||
|
|
||||||
|
例で使用されるテーブルとユーザーを作成します。
|
||||||
|
|
||||||
|
#### サンプルデータベース、テーブル、および行の作成
|
||||||
|
|
||||||
|
1. テストデータベースを作成
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE DATABASE db1;
|
||||||
|
```
|
||||||
|
|
||||||
|
2. テーブルを作成
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE TABLE db1.table1 (
|
||||||
|
id UInt64,
|
||||||
|
column1 String,
|
||||||
|
column2 String
|
||||||
|
)
|
||||||
|
ENGINE MergeTree
|
||||||
|
ORDER BY id;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. サンプル行でテーブルを埋める
|
||||||
|
|
||||||
|
```sql
|
||||||
|
INSERT INTO db1.table1
|
||||||
|
(id, column1, column2)
|
||||||
|
VALUES
|
||||||
|
(1, 'A', 'abc'),
|
||||||
|
(2, 'A', 'def'),
|
||||||
|
(3, 'B', 'abc'),
|
||||||
|
(4, 'B', 'def');
|
||||||
|
```
|
||||||
|
|
||||||
|
4. テーブルを確認する:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 475015cc-6f51-4b20-bda2-3c9c41404e49
|
||||||
|
|
||||||
|
┌─id─┬─column1─┬─column2─┐
|
||||||
|
│ 1 │ A │ abc │
|
||||||
|
│ 2 │ A │ def │
|
||||||
|
│ 3 │ B │ abc │
|
||||||
|
│ 4 │ B │ def │
|
||||||
|
└────┴─────────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
5. 特定のカラムへのアクセスを制限することを示すために使用される通常のユーザーを作成:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER column_user IDENTIFIED BY 'password';
|
||||||
|
```
|
||||||
|
|
||||||
|
6. 特定の値を持つ行へのアクセスを制限することを示すために使用される通常のユーザーを作成:
|
||||||
|
```sql
|
||||||
|
CREATE USER row_user IDENTIFIED BY 'password';
|
||||||
|
```
|
||||||
|
|
||||||
|
#### 役割の作成
|
||||||
|
|
||||||
|
この例を使って:
|
||||||
|
|
||||||
|
- カラムや行、異なる権限のための役割を作成します
|
||||||
|
- 役割に権限を付与します
|
||||||
|
- ユーザーを各役割に割り当てます
|
||||||
|
|
||||||
|
役割は、各ユーザーを個別に管理する代わりに、特定の権限を持つユーザーのグループを定義するために使用されます。
|
||||||
|
|
||||||
|
1. `db1` データベースおよび `table1` において、`column1` のみを閲覧できるユーザーの役割を作成:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE ROLE column1_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
2. `column1` のみが閲覧可能な権限を設定
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT SELECT(id, column1) ON db1.table1 TO column1_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. `column_user` ユーザーを `column1_users` 役割に追加
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT column1_users TO column_user;
|
||||||
|
```
|
||||||
|
|
||||||
|
4. `column1` に `A` を含む行のみを閲覧できるユーザーの役割を作成
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE ROLE A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
5. `row_user` を `A_rows_users` 役割に追加
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT A_rows_users TO row_user;
|
||||||
|
```
|
||||||
|
|
||||||
|
6. `column1` が `A` の値を持つ行のみを閲覧可能とするポリシーを作成
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE ROW POLICY A_row_filter ON db1.table1 FOR SELECT USING column1 = 'A' TO A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
7. データベースとテーブルへの権限を設定
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT SELECT(id, column1, column2) ON db1.table1 TO A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
8. 他の役割に対してもすべての行にアクセスできるように明示的な権限を付与
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE ROW POLICY allow_other_users_filter
|
||||||
|
ON db1.table1 FOR SELECT USING 1 TO clickhouse_admin, column1_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
テーブルにポリシーをアタッチすると、システムはそのポリシーを適用し、定義されたユーザーと役割のみがそのテーブルでの操作を行うことができます。その他のユーザーは操作を拒否されます。制限された行ポリシーが他のユーザーに適用されないようにするため、他のユーザーと役割が通常または他のタイプのアクセスを持つことを許可する別のポリシーを定義する必要があります。
|
||||||
|
:::
|
||||||
|
|
||||||
|
## 検証
|
||||||
|
|
||||||
|
### カラム制限ユーザーでの役割の権限テスト
|
||||||
|
|
||||||
|
1. `clickhouse_admin` ユーザーでClickHouseクライアントにログイン
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --user clickhouse_admin --password password
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 管理者ユーザーを使用して、データベース、テーブル、およびすべての行のアクセスを確認。
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: f5e906ea-10c6-45b0-b649-36334902d31d
|
||||||
|
|
||||||
|
┌─id─┬─column1─┬─column2─┐
|
||||||
|
│ 1 │ A │ abc │
|
||||||
|
│ 2 │ A │ def │
|
||||||
|
│ 3 │ B │ abc │
|
||||||
|
│ 4 │ B │ def │
|
||||||
|
└────┴─────────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
3. `column_user` ユーザーでClickHouseクライアントにログイン
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --user column_user --password password
|
||||||
|
```
|
||||||
|
|
||||||
|
4. すべてのカラムを使用した `SELECT`
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 5576f4eb-7450-435c-a2d6-d6b49b7c4a23
|
||||||
|
|
||||||
|
0 rows in set. Elapsed: 0.006 sec.
|
||||||
|
|
||||||
|
Received exception from server (version 22.3.2):
|
||||||
|
Code: 497. DB::Exception: Received from localhost:9000.
|
||||||
|
DB::Exception: column_user: Not enough privileges.
|
||||||
|
To execute this query it's necessary to have grant
|
||||||
|
SELECT(id, column1, column2) ON db1.table1. (ACCESS_DENIED)
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
すべてのカラムが指定されたためアクセスが拒否されました。ユーザーは `id` と `column1` のみへのアクセス権を持っています
|
||||||
|
:::
|
||||||
|
|
||||||
|
5. 指定されたカラムのみを用いた `SELECT` クエリを確認:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
column1
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: cef9a083-d5ce-42ff-9678-f08dc60d4bb9
|
||||||
|
|
||||||
|
┌─id─┬─column1─┐
|
||||||
|
│ 1 │ A │
|
||||||
|
│ 2 │ A │
|
||||||
|
│ 3 │ B │
|
||||||
|
│ 4 │ B │
|
||||||
|
└────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### 行制限ユーザーでの役割の権限テスト
|
||||||
|
|
||||||
|
1. `row_user` でClickHouseクライアントにログイン
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --user row_user --password password
|
||||||
|
```
|
||||||
|
|
||||||
|
2. 利用可能な行を表示
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: a79a113c-1eca-4c3f-be6e-d034f9a220fb
|
||||||
|
|
||||||
|
┌─id─┬─column1─┬─column2─┐
|
||||||
|
│ 1 │ A │ abc │
|
||||||
|
│ 2 │ A │ def │
|
||||||
|
└────┴─────────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
:::note
|
||||||
|
上記の2行のみが返されることを確認し、`column1` に `B` の値を持つ行は除外されるべきです。
|
||||||
|
:::
|
||||||
|
|
||||||
|
## ユーザーと役割の変更
|
||||||
|
|
||||||
|
ユーザーは必要な権限の組み合わせに対して複数の役割を割り当てることができます。複数の役割を使用する場合、システムは役割を組み合わせて権限を決定し、その結果、役割の権限が累積されます。
|
||||||
|
|
||||||
|
例えば、1つの `role1` が `column1` のみの選択を許可し、`role2` が `column1` と `column2` の選択を許可する場合、ユーザーは両方のカラムにアクセスできます。
|
||||||
|
|
||||||
|
1. 管理者アカウントを使用して、デフォルトの役割で行とカラムの両方を制限する新しいユーザーを作成
|
||||||
|
|
||||||
|
```sql
|
||||||
|
CREATE USER row_and_column_user IDENTIFIED BY 'password' DEFAULT ROLE A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
2. `A_rows_users` 役割に対する以前の権限を削除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REVOKE SELECT(id, column1, column2) ON db1.table1 FROM A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
3. `A_row_users` 役割に `column1` のみの選択を許可
|
||||||
|
|
||||||
|
```sql
|
||||||
|
GRANT SELECT(id, column1) ON db1.table1 TO A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
4. `row_and_column_user` でClickHouseクライアントにログイン
|
||||||
|
|
||||||
|
```
|
||||||
|
clickhouse-client --user row_and_column_user --password password;
|
||||||
|
```
|
||||||
|
|
||||||
|
5. すべてのカラムでテスト:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT *
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 8cdf0ff5-e711-4cbe-bd28-3c02e52e8bc4
|
||||||
|
|
||||||
|
0 rows in set. Elapsed: 0.005 sec.
|
||||||
|
|
||||||
|
Received exception from server (version 22.3.2):
|
||||||
|
Code: 497. DB::Exception: Received from localhost:9000.
|
||||||
|
DB::Exception: row_and_column_user: Not enough privileges.
|
||||||
|
To execute this query it's necessary to have grant
|
||||||
|
SELECT(id, column1, column2) ON db1.table1. (ACCESS_DENIED)
|
||||||
|
```
|
||||||
|
|
||||||
|
6. 制限されたカラムでテスト:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
id,
|
||||||
|
column1
|
||||||
|
FROM db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 5e30b490-507a-49e9-9778-8159799a6ed0
|
||||||
|
|
||||||
|
┌─id─┬─column1─┐
|
||||||
|
│ 1 │ A │
|
||||||
|
│ 2 │ A │
|
||||||
|
└────┴─────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## トラブルシューティング
|
||||||
|
|
||||||
|
権限が交差または結合して予期しない結果を生む場合があります。次のコマンドを使用して管理者アカウントを使用して問題を絞り込むことができます。
|
||||||
|
|
||||||
|
### ユーザーの権限と役割のリスト
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW GRANTS FOR row_and_column_user
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 6a73a3fe-2659-4aca-95c5-d012c138097b
|
||||||
|
|
||||||
|
┌─GRANTS FOR row_and_column_user───────────────────────────┐
|
||||||
|
│ GRANT A_rows_users, column1_users TO row_and_column_user │
|
||||||
|
└──────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### ClickHouse の役割のリスト
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ROLES
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 1e21440a-18d9-4e75-8f0e-66ec9b36470a
|
||||||
|
|
||||||
|
┌─name────────────┐
|
||||||
|
│ A_rows_users │
|
||||||
|
│ column1_users │
|
||||||
|
└─────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### ポリシーの表示
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW ROW POLICIES
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: f2c636e9-f955-4d79-8e80-af40ea227ebc
|
||||||
|
|
||||||
|
┌─name───────────────────────────────────┐
|
||||||
|
│ A_row_filter ON db1.table1 │
|
||||||
|
│ allow_other_users_filter ON db1.table1 │
|
||||||
|
└────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
### ポリシーがどのように定義されているかと現在の権限を表示
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SHOW CREATE ROW POLICY A_row_filter ON db1.table1
|
||||||
|
```
|
||||||
|
|
||||||
|
```response
|
||||||
|
Query id: 0d3b5846-95c7-4e62-9cdd-91d82b14b80b
|
||||||
|
|
||||||
|
┌─CREATE ROW POLICY A_row_filter ON db1.table1────────────────────────────────────────────────┐
|
||||||
|
│ CREATE ROW POLICY A_row_filter ON db1.table1 FOR SELECT USING column1 = 'A' TO A_rows_users │
|
||||||
|
└─────────────────────────────────────────────────────────────────────────────────────────────┘
|
||||||
|
```
|
||||||
|
|
||||||
|
## ロール、ポリシー、およびユーザーを管理するためのコマンドの例
|
||||||
|
|
||||||
|
次のコマンドを使用して:
|
||||||
|
|
||||||
|
- 権限の削除
|
||||||
|
- ポリシーの削除
|
||||||
|
- ユーザーを役割から解除
|
||||||
|
- ユーザーと役割の削除
|
||||||
|
<br />
|
||||||
|
|
||||||
|
:::tip
|
||||||
|
これらのコマンドは管理者ユーザーまたは `default` ユーザーとして実行してください
|
||||||
|
:::
|
||||||
|
|
||||||
|
### 役割からの権限を削除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REVOKE SELECT(column1, id) ON db1.table1 FROM A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
### ポリシーを削除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP ROW POLICY A_row_filter ON db1.table1;
|
||||||
|
```
|
||||||
|
|
||||||
|
### ユーザーを役割から解除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
REVOKE A_rows_users FROM row_user;
|
||||||
|
```
|
||||||
|
|
||||||
|
### 役割を削除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP ROLE A_rows_users;
|
||||||
|
```
|
||||||
|
|
||||||
|
### ユーザーを削除
|
||||||
|
|
||||||
|
```sql
|
||||||
|
DROP USER row_user;
|
||||||
|
```
|
||||||
|
|
||||||
|
## 要約
|
||||||
|
|
||||||
|
このドキュメントでは、SQLユーザーと役割の作成の基本を示し、ユーザーおよび役割の権限を設定および変更する手順を提供しました。それぞれの詳細情報については、ユーザーガイドおよびリファレンスドキュメントを参照してください。
|
BIN
docs/ja/_snippets/images/aws-rds-mysql.png
Normal file
After Width: | Height: | Size: 52 KiB |
BIN
docs/ja/_snippets/images/cloud-advanced-scaling.png
Normal file
After Width: | Height: | Size: 46 KiB |
BIN
docs/ja/_snippets/images/cloud-backup-list.png
Normal file
After Width: | Height: | Size: 37 KiB |
BIN
docs/ja/_snippets/images/cloud-backup-new-service.png
Normal file
After Width: | Height: | Size: 30 KiB |
BIN
docs/ja/_snippets/images/cloud-backup-restore.png
Normal file
After Width: | Height: | Size: 17 KiB |
BIN
docs/ja/_snippets/images/cloud-connect-button.png
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
docs/ja/_snippets/images/cloud-connect-to-sql-console.png
Normal file
After Width: | Height: | Size: 20 KiB |
BIN
docs/ja/_snippets/images/cloud-load-data-sample.png
Normal file
After Width: | Height: | Size: 39 KiB |
BIN
docs/ja/_snippets/images/cloud-select-a-service.png
Normal file
After Width: | Height: | Size: 26 KiB |
BIN
docs/ja/_snippets/images/cloud-service-actions-menu.png
Normal file
After Width: | Height: | Size: 28 KiB |
BIN
docs/ja/_snippets/images/cloud-service-settings.png
Normal file
After Width: | Height: | Size: 10 KiB |
BIN
docs/ja/_snippets/images/cloud-sign-in-or-trial.png
Normal file
After Width: | Height: | Size: 27 KiB |
BIN
docs/ja/_snippets/images/cmek-performance.png
Normal file
After Width: | Height: | Size: 69 KiB |
BIN
docs/ja/_snippets/images/cmek1.png
Normal file
After Width: | Height: | Size: 54 KiB |
BIN
docs/ja/_snippets/images/cmek2.png
Normal file
After Width: | Height: | Size: 61 KiB |
BIN
docs/ja/_snippets/images/cmek3.png
Normal file
After Width: | Height: | Size: 13 KiB |
BIN
docs/ja/_snippets/images/connect1.png
Normal file
After Width: | Height: | Size: 14 KiB |
BIN
docs/ja/_snippets/images/connect2.png
Normal file
After Width: | Height: | Size: 39 KiB |