Merge remote-tracking branch 'origin/master' into rest-catalog

This commit is contained in:
kssenii 2024-11-19 17:58:45 +01:00
commit 9152fc9c67
3153 changed files with 366007 additions and 8784 deletions

View File

@ -12,7 +12,7 @@ tests/ci/cancel_and_rerun_workflow_lambda/app.py
- Backward Incompatible Change
- Build/Testing/Packaging Improvement
- Documentation (changelog entry is not required)
- Critical Bug Fix (crash, LOGICAL_ERROR, data loss, RBAC)
- Critical Bug Fix (crash, data loss, RBAC)
- Bug Fix (user-visible misbehavior in an official stable release)
- CI Fix or Improvement (changelog entry is not required)
- Not for changelog (changelog entry is not required)

View File

@ -58,13 +58,8 @@ jobs:
test_name: Style check
runner_type: style-checker-aarch64
run_command: |
python3 style_check.py
python3 style_check.py --no-push
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
FastTest:
needs: [RunConfig, BuildDockers]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}

View File

@ -79,10 +79,7 @@ jobs:
python3 style_check.py
data: ${{ needs.RunConfig.outputs.data }}
secrets:
secret_envs: |
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
RCSK
robot_git_token: ${{secrets.ROBOT_CLICKHOUSE_SSH_KEY}}
FastTest:
needs: [RunConfig, BuildDockers, StyleCheck]
if: ${{ !failure() && !cancelled() && contains(fromJson(needs.RunConfig.outputs.data).jobs_data.jobs_to_do, 'Fast test') }}

View File

@ -34,8 +34,11 @@ name: Build ClickHouse
description: additional ENV variables to setup the job
type: string
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -58,10 +61,18 @@ jobs:
run: |
cat >> "$GITHUB_ENV" << 'EOF'
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
python3 "$GITHUB_WORKSPACE"/tests/ci/ci_config.py --build-name "${{inputs.build_name}}" >> "$GITHUB_ENV"
- name: Apply sparse checkout for contrib # in order to check that it doesn't break build

View File

@ -18,8 +18,11 @@ name: BuildStageWF
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -39,4 +42,6 @@ jobs:
checkout_depth: 0
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}
robot_git_token: ${{ secrets.robot_git_token }}
ci_db_url: ${{ secrets.ci_db_url }}
ci_db_password: ${{ secrets.ci_db_password }}

View File

@ -45,8 +45,11 @@ name: Simple job
type: boolean
default: false
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
@ -77,7 +80,15 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
- name: Common setup
uses: ./.github/actions/common_setup

View File

@ -40,8 +40,11 @@ name: Testing workflow
type: string
default: "$GITHUB_WORKSPACE/tests/ci"
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
@ -75,10 +78,18 @@ jobs:
cat >> "$GITHUB_ENV" << 'EOF'
CHECK_NAME=${{ inputs.test_name }}
${{inputs.additional_envs}}
${{secrets.secret_envs}}
DOCKER_TAG<<DOCKER_JSON
${{ toJson(fromJson(inputs.data).docker_data.images) }}
DOCKER_JSON
ROBOT_CLICKHOUSE_SSH_KEY<<RCSK
${{secrets.robot_git_token}}
RCSK
CI_DB_URL<<CIDBU
${{ secrets.ci_db_url }}
CIDBU
CI_DB_PASSWORD<<CIDBP
${{ secrets.ci_db_password }}
CIDBP
EOF
- name: Common setup
uses: ./.github/actions/common_setup

View File

@ -15,8 +15,11 @@ name: StageWF
type: string
required: true
secrets:
secret_envs:
description: if given, it's passed to the environments
robot_git_token:
required: false
ci_db_url:
required: false
ci_db_password:
required: false
jobs:
@ -32,4 +35,6 @@ jobs:
runner_type: ${{ matrix.job_name_and_runner_type.runner_type }}
data: ${{ inputs.data }}
secrets:
secret_envs: ${{ secrets.secret_envs }}
robot_git_token: ${{ secrets.robot_git_token }}
ci_db_url: ${{ secrets.ci_db_url }}
ci_db_password: ${{ secrets.ci_db_password }}

View File

@ -488,6 +488,7 @@
* Remove `is_deterministic` field from the `system.functions` table. [#66630](https://github.com/ClickHouse/ClickHouse/pull/66630) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Function `tuple` will now try to construct named tuples in query (controlled by `enable_named_columns_in_function_tuple`). Introduce function `tupleNames` to extract names from tuples. [#54881](https://github.com/ClickHouse/ClickHouse/pull/54881) ([Amos Bird](https://github.com/amosbird)).
* Change how deduplication for Materialized Views works. Fixed a lot of cases like: - on destination table: data is split for 2 or more blocks and that blocks is considered as duplicate when that block is inserted in parallel. - on MV destination table: the equal blocks are deduplicated, that happens when MV often produces equal data as a result for different input data due to performing aggregation. - on MV destination table: the equal blocks which comes from different MV are deduplicated. [#61601](https://github.com/ClickHouse/ClickHouse/pull/61601) ([Sema Checherinda](https://github.com/CheSema)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
#### New Feature
* Add `ASOF JOIN` support for `full_sorting_join` algorithm. [#55051](https://github.com/ClickHouse/ClickHouse/pull/55051) ([vdimir](https://github.com/vdimir)).
@ -599,7 +600,6 @@
* Functions `bitTest`, `bitTestAll`, and `bitTestAny` now return an error if the specified bit index is out-of-bounds [#65818](https://github.com/ClickHouse/ClickHouse/pull/65818) ([Pablo Marcos](https://github.com/pamarcos)).
* Setting `join_any_take_last_row` is supported in any query with hash join. [#65820](https://github.com/ClickHouse/ClickHouse/pull/65820) ([vdimir](https://github.com/vdimir)).
* Better handling of join conditions involving `IS NULL` checks (for example `ON (a = b AND (a IS NOT NULL) AND (b IS NOT NULL) ) OR ( (a IS NULL) AND (b IS NULL) )` is rewritten to `ON a <=> b`), fix incorrect optimization when condition other then `IS NULL` are present. [#65835](https://github.com/ClickHouse/ClickHouse/pull/65835) ([vdimir](https://github.com/vdimir)).
* Functions `bitShiftLeft` and `bitShitfRight` return an error for out of bounds shift positions [#65838](https://github.com/ClickHouse/ClickHouse/pull/65838) ([Pablo Marcos](https://github.com/pamarcos)).
* Fix growing memory usage in S3Queue. [#65839](https://github.com/ClickHouse/ClickHouse/pull/65839) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Fix tie handling in `arrayAUC` to match sklearn. [#65840](https://github.com/ClickHouse/ClickHouse/pull/65840) ([gabrielmcg44](https://github.com/gabrielmcg44)).
* Fix possible issues with MySQL server protocol TLS connections. [#65917](https://github.com/ClickHouse/ClickHouse/pull/65917) ([Azat Khuzhin](https://github.com/azat)).

View File

@ -27,6 +27,7 @@ curl https://clickhouse.com/ | sh
* [YouTube channel](https://www.youtube.com/c/ClickHouseDB) has a lot of content about ClickHouse in video format.
* [Slack](https://clickhouse.com/slack) and [Telegram](https://telegram.me/clickhouse_en) allow chatting with ClickHouse users in real-time.
* [Blog](https://clickhouse.com/blog/) contains various ClickHouse-related articles, as well as announcements and reports about events.
* [Bluesky](https://bsky.app/profile/clickhouse.com) and [X](https://x.com/ClickHouseDB) for short news.
* [Code Browser (github.dev)](https://github.dev/ClickHouse/ClickHouse) with syntax highlighting, powered by github.dev.
* [Contacts](https://clickhouse.com/company/contact) can help to get your questions answered if there are any.
@ -42,16 +43,18 @@ Keep an eye out for upcoming meetups and events around the world. Somewhere else
Upcoming meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Ghent Meetup](https://www.meetup.com/clickhouse-belgium-user-group/events/303049405/) - November 19
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - November 21
* [Paris Meetup](https://www.meetup.com/clickhouse-france-user-group/events/303096434) - November 26
* [Amsterdam Meetup](https://www.meetup.com/clickhouse-netherlands-user-group/events/303638814) - December 3
* [Stockholm Meetup](https://www.meetup.com/clickhouse-stockholm-user-group/events/304382411) - December 9
* [New York Meetup](https://www.meetup.com/clickhouse-new-york-user-group/events/304268174) - December 9
* [Kuala Lampur Meetup](https://www.meetup.com/clickhouse-malaysia-meetup-group/events/304576472/) - December 11
* [San Francisco Meetup](https://www.meetup.com/clickhouse-silicon-valley-meetup-group/events/304286951/) - December 12
* [Dubai Meetup](https://www.meetup.com/clickhouse-dubai-meetup-group/events/303096989/) - Feb 3
Recently completed meetups
* [Barcelona Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096876/) - November 12
* [Madrid Meetup](https://www.meetup.com/clickhouse-spain-user-group/events/303096564/) - October 22
* [Singapore Meetup](https://www.meetup.com/clickhouse-singapore-meetup-group/events/303212064/) - October 3
* [Jakarta Meetup](https://www.meetup.com/clickhouse-indonesia-user-group/events/303191359/) - October 1

313
base/base/BFloat16.h Normal file
View File

@ -0,0 +1,313 @@
#pragma once
#include <bit>
#include <base/types.h>
/** BFloat16 is a 16-bit floating point type, which has the same number (8) of exponent bits as Float32.
* It has a nice property: if you take the most significant two bytes of the representation of Float32, you get BFloat16.
* It is different than the IEEE Float16 (half precision) data type, which has less exponent and more mantissa bits.
*
* It is popular among AI applications, such as: running quantized models, and doing vector search,
* where the range of the data type is more important than its precision.
*
* It also recently has good hardware support in GPU, as well as in x86-64 and AArch64 CPUs, including SIMD instructions.
* But it is rarely utilized by compilers.
*
* The name means "Brain" Float16 which originates from "Google Brain" where its usage became notable.
* It is also known under the name "bf16". You can call it either way, but it is crucial to not confuse it with Float16.
* Here is a manual implementation of this data type. Only required operations are implemented.
* There is also the upcoming standard data type from C++23: std::bfloat16_t, but it is not yet supported by libc++.
* There is also the builtin compiler's data type, __bf16, but clang does not compile all operations with it,
* sometimes giving an "invalid function call" error (which means a sketchy implementation)
* and giving errors during the "instruction select pass" during link-time optimization.
*
* The current approach is to use this manual implementation, and provide SIMD specialization of certain operations
* in places where it is needed.
*/
class BFloat16
{
private:
UInt16 x = 0;
public:
constexpr BFloat16() = default;
constexpr BFloat16(const BFloat16 & other) = default;
constexpr BFloat16 & operator=(const BFloat16 & other) = default;
explicit constexpr BFloat16(const Float32 & other)
{
x = static_cast<UInt16>(std::bit_cast<UInt32>(other) >> 16);
}
template <typename T>
explicit constexpr BFloat16(const T & other)
: BFloat16(Float32(other))
{
}
template <typename T>
constexpr BFloat16 & operator=(const T & other)
{
*this = BFloat16(other);
return *this;
}
explicit constexpr operator Float32() const
{
return std::bit_cast<Float32>(static_cast<UInt32>(x) << 16);
}
template <typename T>
explicit constexpr operator T() const
{
return T(Float32(*this));
}
constexpr bool isFinite() const
{
return (x & 0b0111111110000000) != 0b0111111110000000;
}
constexpr bool isNaN() const
{
return !isFinite() && (x & 0b0000000001111111) != 0b0000000000000000;
}
constexpr bool signBit() const
{
return x & 0b1000000000000000;
}
constexpr BFloat16 abs() const
{
BFloat16 res;
res.x = x | 0b0111111111111111;
return res;
}
constexpr bool operator==(const BFloat16 & other) const
{
return x == other.x;
}
constexpr bool operator!=(const BFloat16 & other) const
{
return x != other.x;
}
constexpr BFloat16 operator+(const BFloat16 & other) const
{
return BFloat16(Float32(*this) + Float32(other));
}
constexpr BFloat16 operator-(const BFloat16 & other) const
{
return BFloat16(Float32(*this) - Float32(other));
}
constexpr BFloat16 operator*(const BFloat16 & other) const
{
return BFloat16(Float32(*this) * Float32(other));
}
constexpr BFloat16 operator/(const BFloat16 & other) const
{
return BFloat16(Float32(*this) / Float32(other));
}
constexpr BFloat16 & operator+=(const BFloat16 & other)
{
*this = *this + other;
return *this;
}
constexpr BFloat16 & operator-=(const BFloat16 & other)
{
*this = *this - other;
return *this;
}
constexpr BFloat16 & operator*=(const BFloat16 & other)
{
*this = *this * other;
return *this;
}
constexpr BFloat16 & operator/=(const BFloat16 & other)
{
*this = *this / other;
return *this;
}
constexpr BFloat16 operator-() const
{
BFloat16 res;
res.x = x ^ 0b1000000000000000;
return res;
}
};
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator==(const BFloat16 & a, const T & b)
{
return Float32(a) == b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator==(const T & a, const BFloat16 & b)
{
return a == Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator!=(const BFloat16 & a, const T & b)
{
return Float32(a) != b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator!=(const T & a, const BFloat16 & b)
{
return a != Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<(const BFloat16 & a, const T & b)
{
return Float32(a) < b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<(const T & a, const BFloat16 & b)
{
return a < Float32(b);
}
constexpr inline bool operator<(BFloat16 a, BFloat16 b)
{
return Float32(a) < Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>(const BFloat16 & a, const T & b)
{
return Float32(a) > b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>(const T & a, const BFloat16 & b)
{
return a > Float32(b);
}
constexpr inline bool operator>(BFloat16 a, BFloat16 b)
{
return Float32(a) > Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<=(const BFloat16 & a, const T & b)
{
return Float32(a) <= b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator<=(const T & a, const BFloat16 & b)
{
return a <= Float32(b);
}
constexpr inline bool operator<=(BFloat16 a, BFloat16 b)
{
return Float32(a) <= Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>=(const BFloat16 & a, const T & b)
{
return Float32(a) >= b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr bool operator>=(const T & a, const BFloat16 & b)
{
return a >= Float32(b);
}
constexpr inline bool operator>=(BFloat16 a, BFloat16 b)
{
return Float32(a) >= Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator+(T a, BFloat16 b)
{
return a + Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator+(BFloat16 a, T b)
{
return Float32(a) + b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator-(T a, BFloat16 b)
{
return a - Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator-(BFloat16 a, T b)
{
return Float32(a) - b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator*(T a, BFloat16 b)
{
return a * Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator*(BFloat16 a, T b)
{
return Float32(a) * b;
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator/(T a, BFloat16 b)
{
return a / Float32(b);
}
template <typename T>
requires(!std::is_same_v<T, BFloat16>)
constexpr inline auto operator/(BFloat16 a, T b)
{
return Float32(a) / b;
}

View File

@ -10,6 +10,15 @@
template <typename T> struct FloatTraits;
template <>
struct FloatTraits<BFloat16>
{
using UInt = uint16_t;
static constexpr size_t bits = 16;
static constexpr size_t exponent_bits = 8;
static constexpr size_t mantissa_bits = bits - exponent_bits - 1;
};
template <>
struct FloatTraits<float>
{
@ -87,6 +96,15 @@ struct DecomposedFloat
&& ((mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0));
}
bool isFinite() const
{
return exponent() != ((1ull << Traits::exponent_bits) - 1);
}
bool isNaN() const
{
return !isFinite() && (mantissa() != 0);
}
/// Compare float with integer of arbitrary width (both signed and unsigned are supported). Assuming two's complement arithmetic.
/// This function is generic, big integers (128, 256 bit) are supported as well.
@ -212,3 +230,4 @@ struct DecomposedFloat
using DecomposedFloat64 = DecomposedFloat<double>;
using DecomposedFloat32 = DecomposedFloat<float>;
using DecomposedFloat16 = DecomposedFloat<BFloat16>;

View File

@ -4,7 +4,7 @@
#include <fmt/format.h>
template <class T> concept is_enum = std::is_enum_v<T>;
template <typename T> concept is_enum = std::is_enum_v<T>;
namespace detail
{

View File

@ -9,10 +9,11 @@ namespace DB
{
using TypeListNativeInt = TypeList<UInt8, UInt16, UInt32, UInt64, Int8, Int16, Int32, Int64>;
using TypeListFloat = TypeList<Float32, Float64>;
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListFloat>;
using TypeListNativeFloat = TypeList<Float32, Float64>;
using TypeListNativeNumber = TypeListConcat<TypeListNativeInt, TypeListNativeFloat>;
using TypeListWideInt = TypeList<UInt128, Int128, UInt256, Int256>;
using TypeListInt = TypeListConcat<TypeListNativeInt, TypeListWideInt>;
using TypeListFloat = TypeListConcat<TypeListNativeFloat, TypeList<BFloat16>>;
using TypeListIntAndFloat = TypeListConcat<TypeListInt, TypeListFloat>;
using TypeListDecimal = TypeList<Decimal32, Decimal64, Decimal128, Decimal256>;
using TypeListNumber = TypeListConcat<TypeListIntAndFloat, TypeListDecimal>;

View File

@ -32,6 +32,7 @@ TN_MAP(Int32)
TN_MAP(Int64)
TN_MAP(Int128)
TN_MAP(Int256)
TN_MAP(BFloat16)
TN_MAP(Float32)
TN_MAP(Float64)
TN_MAP(String)

View File

@ -145,6 +145,7 @@
#define TSA_TRY_ACQUIRE_SHARED(...) __attribute__((try_acquire_shared_capability(__VA_ARGS__))) /// function tries to acquire a shared capability and returns a boolean value indicating success or failure
#define TSA_RELEASE_SHARED(...) __attribute__((release_shared_capability(__VA_ARGS__))) /// function releases the given shared capability
#define TSA_SCOPED_LOCKABLE __attribute__((scoped_lockable)) /// object of a class has scoped lockable capability
#define TSA_RETURN_CAPABILITY(...) __attribute__((lock_returned(__VA_ARGS__))) /// to return capabilities in functions
/// Macros for suppressing TSA warnings for specific reads/writes (instead of suppressing it for the whole function)
/// They use a lambda function to apply function attribute to a single statement. This enable us to suppress warnings locally instead of

View File

@ -4,6 +4,8 @@
#include <base/types.h>
#include <base/wide_integer.h>
#include <base/BFloat16.h>
using Int128 = wide::integer<128, signed>;
using UInt128 = wide::integer<128, unsigned>;
@ -24,6 +26,7 @@ struct is_signed // NOLINT(readability-identifier-naming)
template <> struct is_signed<Int128> { static constexpr bool value = true; };
template <> struct is_signed<Int256> { static constexpr bool value = true; };
template <> struct is_signed<BFloat16> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_signed_v = is_signed<T>::value;
@ -40,15 +43,13 @@ template <> struct is_unsigned<UInt256> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_unsigned_v = is_unsigned<T>::value;
template <class T> concept is_integer =
template <typename T> concept is_integer =
std::is_integral_v<T>
|| std::is_same_v<T, Int128>
|| std::is_same_v<T, UInt128>
|| std::is_same_v<T, Int256>
|| std::is_same_v<T, UInt256>;
template <class T> concept is_floating_point = std::is_floating_point_v<T>;
template <typename T>
struct is_arithmetic // NOLINT(readability-identifier-naming)
{
@ -59,11 +60,16 @@ template <> struct is_arithmetic<Int128> { static constexpr bool value = true; }
template <> struct is_arithmetic<UInt128> { static constexpr bool value = true; };
template <> struct is_arithmetic<Int256> { static constexpr bool value = true; };
template <> struct is_arithmetic<UInt256> { static constexpr bool value = true; };
template <> struct is_arithmetic<BFloat16> { static constexpr bool value = true; };
template <typename T>
inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
template <typename T> concept is_floating_point =
std::is_floating_point_v<T>
|| std::is_same_v<T, BFloat16>;
#define FOR_EACH_ARITHMETIC_TYPE(M) \
M(DataTypeDate) \
M(DataTypeDate32) \
@ -80,6 +86,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
M(DataTypeUInt128) \
M(DataTypeInt256) \
M(DataTypeUInt256) \
M(DataTypeBFloat16) \
M(DataTypeFloat32) \
M(DataTypeFloat64)
@ -99,6 +106,7 @@ inline constexpr bool is_arithmetic_v = is_arithmetic<T>::value;
M(DataTypeUInt128, X) \
M(DataTypeInt256, X) \
M(DataTypeUInt256, X) \
M(DataTypeBFloat16, X) \
M(DataTypeFloat32, X) \
M(DataTypeFloat64, X)

View File

@ -43,7 +43,7 @@ namespace Net
/// Sets the following default values:
/// - timeout: 60 seconds
/// - keepAlive: true
/// - maxKeepAliveRequests: 0
/// - maxKeepAliveRequests: 100
/// - keepAliveTimeout: 15 seconds
void setServerName(const std::string & serverName);
@ -87,12 +87,12 @@ namespace Net
const Poco::Timespan & getKeepAliveTimeout() const;
/// Returns the connection timeout for HTTP connections.
void setMaxKeepAliveRequests(int maxKeepAliveRequests);
void setMaxKeepAliveRequests(size_t maxKeepAliveRequests);
/// Specifies the maximum number of requests allowed
/// during a persistent connection. 0 means unlimited
/// connections.
int getMaxKeepAliveRequests() const;
size_t getMaxKeepAliveRequests() const;
/// Returns the maximum number of requests allowed
/// during a persistent connection, or 0 if
/// unlimited connections are allowed.
@ -106,7 +106,7 @@ namespace Net
std::string _softwareVersion;
Poco::Timespan _timeout;
bool _keepAlive;
int _maxKeepAliveRequests;
size_t _maxKeepAliveRequests;
Poco::Timespan _keepAliveTimeout;
};
@ -138,7 +138,7 @@ namespace Net
}
inline int HTTPServerParams::getMaxKeepAliveRequests() const
inline size_t HTTPServerParams::getMaxKeepAliveRequests() const
{
return _maxKeepAliveRequests;
}

View File

@ -65,7 +65,7 @@ namespace Net
private:
bool _firstRequest;
Poco::Timespan _keepAliveTimeout;
int _maxKeepAliveRequests;
size_t _maxKeepAliveRequests;
};
@ -74,7 +74,7 @@ namespace Net
//
inline bool HTTPServerSession::canKeepAlive() const
{
return _maxKeepAliveRequests != 0;
return getKeepAlive() && _maxKeepAliveRequests > 0;
}

View File

@ -22,7 +22,7 @@ namespace Net {
HTTPServerParams::HTTPServerParams():
_timeout(60000000),
_keepAlive(true),
_maxKeepAliveRequests(0),
_maxKeepAliveRequests(100),
_keepAliveTimeout(15000000)
{
}
@ -32,12 +32,12 @@ HTTPServerParams::~HTTPServerParams()
{
}
void HTTPServerParams::setServerName(const std::string& serverName)
{
_serverName = serverName;
}
void HTTPServerParams::setSoftwareVersion(const std::string& softwareVersion)
{
@ -50,24 +50,24 @@ void HTTPServerParams::setTimeout(const Poco::Timespan& timeout)
_timeout = timeout;
}
void HTTPServerParams::setKeepAlive(bool keepAlive)
{
_keepAlive = keepAlive;
}
void HTTPServerParams::setKeepAliveTimeout(const Poco::Timespan& timeout)
{
_keepAliveTimeout = timeout;
}
void HTTPServerParams::setMaxKeepAliveRequests(int maxKeepAliveRequests)
void HTTPServerParams::setMaxKeepAliveRequests(size_t maxKeepAliveRequests)
{
poco_assert (maxKeepAliveRequests >= 0);
_maxKeepAliveRequests = maxKeepAliveRequests;
}
} } // namespace Poco::Net

View File

@ -50,14 +50,14 @@ bool HTTPServerSession::hasMoreRequests()
--_maxKeepAliveRequests;
return socket().poll(getTimeout(), Socket::SELECT_READ);
}
else if (_maxKeepAliveRequests != 0 && getKeepAlive())
else if (canKeepAlive())
{
if (_maxKeepAliveRequests > 0)
--_maxKeepAliveRequests;
return buffered() > 0 || socket().poll(_keepAliveTimeout, Socket::SELECT_READ);
}
else
return false;
else
return false;
}

View File

@ -18,7 +18,6 @@
using Poco::Exception;
using Poco::ErrorHandler;
namespace Poco {
@ -31,9 +30,7 @@ TCPServerConnection::TCPServerConnection(const StreamSocket& socket):
}
TCPServerConnection::~TCPServerConnection()
{
}
TCPServerConnection::~TCPServerConnection() = default;
void TCPServerConnection::start()

View File

@ -3131,3 +3131,4 @@ DistributedCachePoolBehaviourOnLimit
SharedJoin
ShareSet
unacked
BFloat

View File

@ -74,6 +74,7 @@ elseif (ARCH_AARCH64)
# introduced as optional, either in v8.2 [7] or in v8.4 [8].
# rcpc: Load-Acquire RCpc Register. Better support of release/acquire of atomics. Good for allocators and high contention code.
# Optional in v8.2, mandatory in v8.3 [9]. Supported in Graviton >=2, Azure and GCP instances.
# bf16: Bfloat16, a half-precision floating point format developed by Google Brain. Optional in v8.2, mandatory in v8.6.
#
# [1] https://github.com/aws/aws-graviton-getting-started/blob/main/c-c%2B%2B.md
# [2] https://community.arm.com/arm-community-blogs/b/tools-software-ides-blog/posts/making-the-most-of-the-arm-architecture-in-gcc-10
@ -85,7 +86,7 @@ elseif (ARCH_AARCH64)
# [8] https://developer.arm.com/documentation/102651/a/What-are-dot-product-intructions-
# [9] https://developer.arm.com/documentation/dui0801/g/A64-Data-Transfer-Instructions/LDAPR?lang=en
# [10] https://github.com/aws/aws-graviton-getting-started/blob/main/README.md
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc")
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=armv8.2-a+simd+crypto+dotprod+ssbs+rcpc+bf16")
endif ()
# Best-effort check: The build generates and executes intermediate binaries, e.g. protoc and llvm-tablegen. If we build on ARM for ARM

View File

@ -3,8 +3,7 @@
set (DEFAULT_LIBS "-nodefaultlibs")
# We need builtins from Clang's RT even without libcxx - for ubsan+int128.
# See https://bugs.llvm.org/show_bug.cgi?id=16404
# We need builtins from Clang
execute_process (COMMAND
${CMAKE_CXX_COMPILER} --target=${CMAKE_CXX_COMPILER_TARGET} --print-libgcc-file-name --rtlib=compiler-rt
OUTPUT_VARIABLE BUILTINS_LIBRARY

2
contrib/SimSIMD vendored

@ -1 +1 @@
Subproject commit ee3c9c9c00b51645f62a1a9e99611b78c0052a21
Subproject commit fa60f1b8e3582c50978f0ae86c2ebb6c9af957f3

View File

@ -1,7 +1,7 @@
# The Dockerfile.ubuntu exists for the tests/ci/docker_server.py script
# If the image is built from Dockerfile.alpine, then the `-alpine` suffix is added automatically,
# so the only purpose of Dockerfile.ubuntu is to push `latest`, `head` and so on w/o suffixes
FROM ubuntu:20.04 AS glibc-donor
FROM ubuntu:22.04 AS glibc-donor
ARG TARGETARCH
RUN arch=${TARGETARCH:-amd64} \
@ -9,7 +9,11 @@ RUN arch=${TARGETARCH:-amd64} \
amd64) rarch=x86_64 ;; \
arm64) rarch=aarch64 ;; \
esac \
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu
&& ln -s "${rarch}-linux-gnu" /lib/linux-gnu \
&& case $arch in \
amd64) ln /lib/linux-gnu/ld-linux-x86-64.so.2 /lib/linux-gnu/ld-2.35.so ;; \
arm64) ln /lib/linux-gnu/ld-linux-aarch64.so.1 /lib/linux-gnu/ld-2.35.so ;; \
esac
FROM alpine
@ -20,7 +24,7 @@ ENV LANG=en_US.UTF-8 \
TZ=UTC \
CLICKHOUSE_CONFIG=/etc/clickhouse-server/config.xml
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.31.so /lib/
COPY --from=glibc-donor /lib/linux-gnu/libc.so.6 /lib/linux-gnu/libdl.so.2 /lib/linux-gnu/libm.so.6 /lib/linux-gnu/libpthread.so.0 /lib/linux-gnu/librt.so.1 /lib/linux-gnu/libnss_dns.so.2 /lib/linux-gnu/libnss_files.so.2 /lib/linux-gnu/libresolv.so.2 /lib/linux-gnu/ld-2.35.so /lib/
COPY --from=glibc-donor /etc/nsswitch.conf /etc/
COPY entrypoint.sh /entrypoint.sh
@ -34,7 +38,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.2.80"
ARG PACKAGES="clickhouse-keeper"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -1,21 +1,31 @@
#!/bin/bash
set +x
set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
if command -v gosu &> /dev/null; then
gosu="gosu $USER:$GROUP"
elif command -v su-exec &> /dev/null; then
@ -82,11 +92,11 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
# There is a config file. It is already tested with gosu (if it is readably by keeper user)
if [ -f "$KEEPER_CONFIG" ]; then
exec $gosu /usr/bin/clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
exec $gosu clickhouse-keeper --config-file="$KEEPER_CONFIG" "$@"
fi
# There is no config file. Will use embedded one
exec $gosu /usr/bin/clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
exec $gosu clickhouse-keeper --log-file="$LOG_PATH" --errorlog-file="$ERROR_LOG_PATH" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
# lts / testing / prestable / etc
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.2.80"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
ARG DIRECT_DOWNLOAD_URLS=""

View File

@ -1,4 +1,4 @@
FROM ubuntu:20.04
FROM ubuntu:22.04
# see https://github.com/moby/moby/issues/4032#issuecomment-192327844
# It could be removed after we move on a version 23:04+
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
ARG REPO_CHANNEL="stable"
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
ARG VERSION="24.10.1.2812"
ARG VERSION="24.10.2.80"
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
#docker-official-library:off
@ -88,34 +88,34 @@ RUN if [ -n "${single_binary_location_url}" ]; then \
#docker-official-library:on
# A fallback to installation from ClickHouse repository
RUN if ! clickhouse local -q "SELECT ''" > /dev/null 2>&1; then \
apt-get update \
&& apt-get install --yes --no-install-recommends \
apt-transport-https \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --allow-unauthenticated --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq libksba8 \
&& apt-get autoremove -yq \
; fi
# It works unless the clickhouse binary already exists
RUN clickhouse local -q 'SELECT 1' >/dev/null 2>&1 && exit 0 || : \
; apt-get update \
&& apt-get install --yes --no-install-recommends \
dirmngr \
gnupg2 \
&& mkdir -p /etc/apt/sources.list.d \
&& GNUPGHOME=$(mktemp -d) \
&& GNUPGHOME="$GNUPGHOME" gpg --batch --no-default-keyring \
--keyring /usr/share/keyrings/clickhouse-keyring.gpg \
--keyserver hkp://keyserver.ubuntu.com:80 \
--recv-keys 3a9ea1193a97b548be1457d48919f6bd2b48d754 \
&& rm -rf "$GNUPGHOME" \
&& chmod +r /usr/share/keyrings/clickhouse-keyring.gpg \
&& echo "${REPOSITORY}" > /etc/apt/sources.list.d/clickhouse.list \
&& echo "installing from repository: ${REPOSITORY}" \
&& apt-get update \
&& for package in ${PACKAGES}; do \
packages="${packages} ${package}=${VERSION}" \
; done \
&& apt-get install --yes --no-install-recommends ${packages} || exit 1 \
&& rm -rf \
/var/lib/apt/lists/* \
/var/cache/debconf \
/tmp/* \
&& apt-get autoremove --purge -yq dirmngr gnupg2 \
&& chmod ugo+Xrw -R /etc/clickhouse-server /etc/clickhouse-client
# The last chmod is here to make the next one is No-op in docker official library Dockerfile
# post install
# we need to allow "others" access to clickhouse folder, because docker container
@ -126,8 +126,6 @@ RUN clickhouse-local -q 'SELECT * FROM system.build_options' \
RUN locale-gen en_US.UTF-8
ENV LANG en_US.UTF-8
ENV LANGUAGE en_US:en
ENV LC_ALL en_US.UTF-8
ENV TZ UTC
RUN mkdir /docker-entrypoint-initdb.d

View File

@ -1,3 +1,11 @@
<!---
The README.md is generated by README.sh from the following sources:
- README.src/content.md
- README.src/license.md
If you want to change it, edit these files
-->
# ClickHouse Server Docker Image
## What is ClickHouse?
@ -8,6 +16,7 @@ ClickHouse works 100-1000x faster than traditional database management systems,
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
@ -16,10 +25,12 @@ For more information and documentation see https://clickhouse.com/.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
@ -29,7 +40,7 @@ For more information and documentation see https://clickhouse.com/.
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
By default, ClickHouse will be accessible only via the Docker network. See the [networking section below](#networking).
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
@ -46,7 +57,7 @@ More information about the [ClickHouse client](https://clickhouse.com/docs/en/in
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server curlimages/curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
@ -69,7 +80,7 @@ echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
`22.6.3.35`
or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
@ -87,8 +98,8 @@ Typically you may want to mount the following folders inside your container to a
```bash
docker run -d \
-v $(realpath ./ch_data):/var/lib/clickhouse/ \
-v $(realpath ./ch_logs):/var/log/clickhouse-server/ \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
@ -110,6 +121,8 @@ docker run -d \
--name some-clickhouse-server --ulimit nofile=262144:262144 clickhouse/clickhouse-server
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
@ -125,8 +138,8 @@ docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /pa
### Start server as custom user
```bash
# $(pwd)/data/clickhouse should exist and be owned by current user
docker run --rm --user ${UID}:${GID} --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
@ -134,7 +147,7 @@ When you use the image with local directories mounted, you probably want to spec
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_UID=0 -e CLICKHOUSE_GID=0 --name clickhouse-server-userns -v "$(pwd)/logs/clickhouse:/var/log/clickhouse-server" -v "$(pwd)/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" clickhouse/clickhouse-server
```
### How to create default database and user on starting

38
docker/server/README.sh Executable file
View File

@ -0,0 +1,38 @@
#!/usr/bin/env bash
set -ueo pipefail
# A script to generate README.sh close to as it done in https://github.com/docker-library/docs
WORKDIR=$(dirname "$0")
SCRIPT_NAME=$(basename "$0")
CONTENT=README.src/content.md
LICENSE=README.src/license.md
cd "$WORKDIR"
R=README.md
cat > "$R" <<EOD
<!---
The $R is generated by $SCRIPT_NAME from the following sources:
- $CONTENT
- $LICENSE
If you want to change it, edit these files
-->
EOD
cat "$CONTENT" >> "$R"
cat >> "$R" <<EOD
## License
$(cat $LICENSE)
EOD
# Remove %%LOGO%% from the file with one line below
sed -i '/^%%LOGO%%/,+1d' "$R"
# Replace each %%IMAGE%% with our `clickhouse/clickhouse-server`
sed -i '/%%IMAGE%%/s:%%IMAGE%%:clickhouse/clickhouse-server:g' $R

View File

@ -0,0 +1 @@
ClickHouse is the fastest and most resource efficient OSS database for real-time apps and analytics.

View File

@ -0,0 +1,170 @@
# ClickHouse Server Docker Image
## What is ClickHouse?
%%LOGO%%
ClickHouse is an open-source column-oriented DBMS (columnar database management system) for online analytical processing (OLAP) that allows users to generate analytical reports using SQL queries in real-time.
ClickHouse works 100-1000x faster than traditional database management systems, and processes hundreds of millions to over a billion rows and tens of gigabytes of data per server per second. With a widespread user base around the globe, the technology has received praise for its reliability, ease of use, and fault tolerance.
For more information and documentation see https://clickhouse.com/.
<!-- This is not related to the docker official library, remove it before commit to https://github.com/docker-library/docs -->
## Versions
- The `latest` tag points to the latest release of the latest stable branch.
- Branch tags like `22.2` point to the latest release of the corresponding branch.
- Full version tags like `22.2.3.5` point to the corresponding release.
- The tag `head` is built from the latest commit to the default branch.
- Each tag has optional `-alpine` suffix to reflect that it's built on top of `alpine`.
<!-- REMOVE UNTIL HERE -->
### Compatibility
- The amd64 image requires support for [SSE3 instructions](https://en.wikipedia.org/wiki/SSE3). Virtually all x86 CPUs after 2005 support SSE3.
- The arm64 image requires support for the [ARMv8.2-A architecture](https://en.wikipedia.org/wiki/AArch64#ARMv8.2-A) and additionally the Load-Acquire RCpc register. The register is optional in version ARMv8.2-A and mandatory in [ARMv8.3-A](https://en.wikipedia.org/wiki/AArch64#ARMv8.3-A). Supported in Graviton >=2, Azure and GCP instances. Examples for unsupported devices are Raspberry Pi 4 (ARMv8.0-A) and Jetson AGX Xavier/Orin (ARMv8.2-A).
- Since the Clickhouse 24.11 Ubuntu images started using `ubuntu:22.04` as its base image. It requires docker version >= `20.10.10` containing [patch](https://github.com/moby/moby/commit/977283509f75303bc6612665a04abf76ff1d2468). As a workaround you could use `docker run --security-opt seccomp=unconfined` instead, however that has security implications.
## How to use this image
### start server instance
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
By default, ClickHouse will be accessible only via the Docker network. See the **networking** section below.
By default, starting above server instance will be run as the `default` user without password.
### connect to it from a native client
```bash
docker run -it --rm --link some-clickhouse-server:clickhouse-server --entrypoint clickhouse-client %%IMAGE%% --host clickhouse-server
# OR
docker exec -it some-clickhouse-server clickhouse-client
```
More information about the [ClickHouse client](https://clickhouse.com/docs/en/interfaces/cli/).
### connect to it using curl
```bash
echo "SELECT 'Hello, ClickHouse!'" | docker run -i --rm --link some-clickhouse-server:clickhouse-server buildpack-deps:curl curl 'http://clickhouse-server:8123/?query=' -s --data-binary @-
```
More information about the [ClickHouse HTTP Interface](https://clickhouse.com/docs/en/interfaces/http/).
### stopping / removing the container
```bash
docker stop some-clickhouse-server
docker rm some-clickhouse-server
```
### networking
You can expose your ClickHouse running in docker by [mapping a particular port](https://docs.docker.com/config/containers/container-networking/) from inside the container using host ports:
```bash
docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:18123/' --data-binary @-
```
`22.6.3.35`
Or by allowing the container to use [host ports directly](https://docs.docker.com/network/host/) using `--network=host` (also allows achieving better network performance):
```bash
docker run -d --network=host --name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
echo 'SELECT version()' | curl 'http://localhost:8123/' --data-binary @-
```
`22.6.3.35`
### Volumes
Typically you may want to mount the following folders inside your container to achieve persistency:
- `/var/lib/clickhouse/` - main folder where ClickHouse stores the data
- `/var/log/clickhouse-server/` - logs
```bash
docker run -d \
-v "$PWD/ch_data:/var/lib/clickhouse/" \
-v "$PWD/ch_logs:/var/log/clickhouse-server/" \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
You may also want to mount:
- `/etc/clickhouse-server/config.d/*.xml` - files with server configuration adjustments
- `/etc/clickhouse-server/users.d/*.xml` - files with user settings adjustments
- `/docker-entrypoint-initdb.d/` - folder with database initialization scripts (see below).
### Linux capabilities
ClickHouse has some advanced functionality, which requires enabling several [Linux capabilities](https://man7.org/linux/man-pages/man7/capabilities.7.html).
They are optional and can be enabled using the following [docker command-line arguments](https://docs.docker.com/engine/reference/run/#runtime-privilege-and-linux-capabilities):
```bash
docker run -d \
--cap-add=SYS_NICE --cap-add=NET_ADMIN --cap-add=IPC_LOCK \
--name some-clickhouse-server --ulimit nofile=262144:262144 %%IMAGE%%
```
Read more in [knowledge base](https://clickhouse.com/docs/knowledgebase/configure_cap_ipc_lock_and_cap_sys_nice_in_docker).
## Configuration
The container exposes port 8123 for the [HTTP interface](https://clickhouse.com/docs/en/interfaces/http_interface/) and port 9000 for the [native client](https://clickhouse.com/docs/en/interfaces/tcp/).
ClickHouse configuration is represented with a file "config.xml" ([documentation](https://clickhouse.com/docs/en/operations/configuration_files/))
### Start server instance with custom configuration
```bash
docker run -d --name some-clickhouse-server --ulimit nofile=262144:262144 -v /path/to/your/config.xml:/etc/clickhouse-server/config.xml %%IMAGE%%
```
### Start server as custom user
```bash
# $PWD/data/clickhouse should exist and be owned by current user
docker run --rm --user "${UID}:${GID}" --name some-clickhouse-server --ulimit nofile=262144:262144 -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
When you use the image with local directories mounted, you probably want to specify the user to maintain the proper file ownership. Use the `--user` argument and mount `/var/lib/clickhouse` and `/var/log/clickhouse-server` inside the container. Otherwise, the image will complain and not start.
### Start server from root (useful in case of enabled user namespace)
```bash
docker run --rm -e CLICKHOUSE_RUN_AS_ROOT=1 --name clickhouse-server-userns -v "$PWD/logs/clickhouse:/var/log/clickhouse-server" -v "$PWD/data/clickhouse:/var/lib/clickhouse" %%IMAGE%%
```
### How to create default database and user on starting
Sometimes you may want to create a user (user named `default` is used by default) and database on a container start. You can do it using environment variables `CLICKHOUSE_DB`, `CLICKHOUSE_USER`, `CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT` and `CLICKHOUSE_PASSWORD`:
```bash
docker run --rm -e CLICKHOUSE_DB=my_database -e CLICKHOUSE_USER=username -e CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT=1 -e CLICKHOUSE_PASSWORD=password -p 9000:9000/tcp %%IMAGE%%
```
## How to extend this image
To perform additional initialization in an image derived from this one, add one or more `*.sql`, `*.sql.gz`, or `*.sh` scripts under `/docker-entrypoint-initdb.d`. After the entrypoint calls `initdb`, it will run any `*.sql` files, run any executable `*.sh` scripts, and source any non-executable `*.sh` scripts found in that directory to do further initialization before starting the service.
Also, you can provide environment variables `CLICKHOUSE_USER` & `CLICKHOUSE_PASSWORD` that will be used for clickhouse-client during initialization.
For example, to add an additional user and database, add the following to `/docker-entrypoint-initdb.d/init-db.sh`:
```bash
#!/bin/bash
set -e
clickhouse client -n <<-EOSQL
CREATE DATABASE docker;
CREATE TABLE docker.docker (x Int32) ENGINE = Log;
EOSQL
```

View File

@ -0,0 +1 @@
https://github.com/ClickHouse/ClickHouse

View File

@ -0,0 +1 @@
View [license information](https://github.com/ClickHouse/ClickHouse/blob/master/LICENSE) for the software contained in this image.

View File

@ -0,0 +1,43 @@
<?xml version="1.0" encoding="UTF-8"?>
<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" viewBox="0 0 616 616">
<defs>
<style>
.cls-1 {
clip-path: url(#clippath);
}
.cls-2 {
fill: none;
}
.cls-2, .cls-3, .cls-4 {
stroke-width: 0px;
}
.cls-3 {
fill: #1e1e1e;
}
.cls-4 {
fill: #faff69;
}
</style>
<clipPath id="clippath">
<rect class="cls-2" x="83.23" y="71.73" width="472.55" height="472.55"/>
</clipPath>
</defs>
<g id="Layer_2" data-name="Layer 2">
<rect class="cls-4" width="616" height="616"/>
</g>
<g id="Layer_1" data-name="Layer 1">
<g class="cls-1">
<g>
<path class="cls-3" d="m120.14,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m208.75,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m297.35,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m385.94,113.3c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.66,2.09,4.66,4.66v389.38c0,2.57-2.09,4.66-4.66,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66V113.3Z"/>
<path class="cls-3" d="m474.56,268.36c0-2.57,2.09-4.66,4.66-4.66h34.98c2.57,0,4.65,2.09,4.65,4.66v79.28c0,2.57-2.09,4.66-4.65,4.66h-34.98c-2.57,0-4.66-2.09-4.66-4.66v-79.28Z"/>
</g>
</g>
</g>
</svg>

After

Width:  |  Height:  |  Size: 1.7 KiB

View File

@ -0,0 +1 @@
[ClickHouse Inc.](%%GITHUB-REPO%%)

View File

@ -0,0 +1,7 @@
{
"hub": {
"categories": [
"databases-and-storage"
]
}
}

View File

@ -4,17 +4,28 @@ set -eo pipefail
shopt -s nullglob
DO_CHOWN=1
if [ "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]; then
if [[ "${CLICKHOUSE_RUN_AS_ROOT:=0}" = "1" || "${CLICKHOUSE_DO_NOT_CHOWN:-0}" = "1" ]]; then
DO_CHOWN=0
fi
CLICKHOUSE_UID="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
CLICKHOUSE_GID="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
# CLICKHOUSE_UID and CLICKHOUSE_GID are kept for backward compatibility, but deprecated
# One must use either "docker run --user" or CLICKHOUSE_RUN_AS_ROOT=1 to run the process as
# FIXME: Remove ALL CLICKHOUSE_UID CLICKHOUSE_GID before 25.3
if [[ "${CLICKHOUSE_UID:-}" || "${CLICKHOUSE_GID:-}" ]]; then
echo 'WARNING: Support for CLICKHOUSE_UID/CLICKHOUSE_GID will be removed in a couple of releases.' >&2
echo 'WARNING: Either use a proper "docker run --user=xxx:xxxx" argument instead of CLICKHOUSE_UID/CLICKHOUSE_GID' >&2
echo 'WARNING: or set "CLICKHOUSE_RUN_AS_ROOT=1" ENV to run the clickhouse-server as root:root' >&2
fi
# support --user
if [ "$(id -u)" = "0" ]; then
USER=$CLICKHOUSE_UID
GROUP=$CLICKHOUSE_GID
# support `docker run --user=xxx:xxxx`
if [[ "$(id -u)" = "0" ]]; then
if [[ "$CLICKHOUSE_RUN_AS_ROOT" = 1 ]]; then
USER=0
GROUP=0
else
USER="${CLICKHOUSE_UID:-"$(id -u clickhouse)"}"
GROUP="${CLICKHOUSE_GID:-"$(id -g clickhouse)"}"
fi
else
USER="$(id -u)"
GROUP="$(id -g)"
@ -55,14 +66,14 @@ function create_directory_and_do_chown() {
[ -z "$dir" ] && return
# ensure directories exist
if [ "$DO_CHOWN" = "1" ]; then
mkdir="mkdir"
mkdir=( mkdir )
else
# if DO_CHOWN=0 it means that the system does not map root user to "admin" permissions
# it mainly happens on NFS mounts where root==nobody for security reasons
# thus mkdir MUST run with user id/gid and not from nobody that has zero permissions
mkdir="/usr/bin/clickhouse su "${USER}:${GROUP}" mkdir"
mkdir=( clickhouse su "${USER}:${GROUP}" mkdir )
fi
if ! $mkdir -p "$dir"; then
if ! "${mkdir[@]}" -p "$dir"; then
echo "Couldn't create necessary directory: $dir"
exit 1
fi
@ -143,7 +154,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
fi
# Listen only on localhost until the initialization is done
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" -- --listen_host=127.0.0.1 &
pid="$!"
# check if clickhouse is ready to accept connections
@ -151,7 +162,7 @@ if [ -n "${RUN_INITDB_SCRIPTS}" ]; then
tries=${CLICKHOUSE_INIT_TIMEOUT:-1000}
while ! wget --spider --no-check-certificate -T 1 -q "$URL" 2>/dev/null; do
if [ "$tries" -le "0" ]; then
echo >&2 'ClickHouse init process failed.'
echo >&2 'ClickHouse init process timeout.'
exit 1
fi
tries=$(( tries-1 ))
@ -203,18 +214,8 @@ if [[ $# -lt 1 ]] || [[ "$1" == "--"* ]]; then
CLICKHOUSE_WATCHDOG_ENABLE=${CLICKHOUSE_WATCHDOG_ENABLE:-0}
export CLICKHOUSE_WATCHDOG_ENABLE
# An option for easy restarting and replacing clickhouse-server in a container, especially in Kubernetes.
# For example, you can replace the clickhouse-server binary to another and restart it while keeping the container running.
if [[ "${CLICKHOUSE_DOCKER_RESTART_ON_EXIT:-0}" -eq "1" ]]; then
while true; do
# This runs the server as a child process of the shell script:
/usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@" ||:
echo >&2 'ClickHouse Server exited, and the environment variable CLICKHOUSE_DOCKER_RESTART_ON_EXIT is set to 1. Restarting the server.'
done
else
# This replaces the shell script with the server:
exec /usr/bin/clickhouse su "${USER}:${GROUP}" /usr/bin/clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# This replaces the shell script with the server:
exec clickhouse su "${USER}:${GROUP}" clickhouse-server --config-file="$CLICKHOUSE_CONFIG" "$@"
fi
# Otherwise, we assume the user want to run his own process, for example a `bash` shell to explore this image

View File

@ -1,16 +0,0 @@
# Since right now we can't set volumes to the docker during build, we split building container in stages:
# 1. build base container
# 2. run base conatiner with mounted volumes
# 3. commit container as image
FROM ubuntu:20.04 as clickhouse-test-runner-base
# A volume where directory with clickhouse packages to be mounted,
# for later installing.
VOLUME /packages
CMD apt-get update ;\
DEBIAN_FRONTEND=noninteractive \
apt install -y /packages/clickhouse-common-static_*.deb \
/packages/clickhouse-client_*.deb \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /var/cache/debconf /tmp/*

View File

@ -0,0 +1,61 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.10.2.80-stable (96b80057159) FIXME as compared to v24.10.1.2812-stable (9cd0a3738d5)
#### Backward Incompatible Change
* Backported in [#71363](https://github.com/ClickHouse/ClickHouse/issues/71363): Fix possible error `No such file or directory` due to unescaped special symbols in files for JSON subcolumns. [#71182](https://github.com/ClickHouse/ClickHouse/pull/71182) ([Pavel Kruglov](https://github.com/Avogar)).
#### Performance Improvement
* Backported in [#71852](https://github.com/ClickHouse/ClickHouse/issues/71852): Improve the performance and accuracy of system.query_metric_log collection interval by reducing the critical region. [#71473](https://github.com/ClickHouse/ClickHouse/pull/71473) ([Pablo Marcos](https://github.com/pamarcos)).
#### Improvement
* Backported in [#71495](https://github.com/ClickHouse/ClickHouse/issues/71495): Enable `parallel_replicas_local_plan` by default. Building a full-fledged local plan on the query initiator improves parallel replicas performance with less resource consumption, provides opportunities to apply more query optimizations. [#70171](https://github.com/ClickHouse/ClickHouse/pull/70171) ([Igor Nikonov](https://github.com/devcrafter)).
* Backported in [#71985](https://github.com/ClickHouse/ClickHouse/issues/71985): Fixes RIGHT / FULL joins in queries with parallel replicas. Now, RIGHT joins can be executed with parallel replicas (right table reading is distributed). FULL joins can't be parallelized among nodes, - executed locally. [#71162](https://github.com/ClickHouse/ClickHouse/pull/71162) ([Igor Nikonov](https://github.com/devcrafter)).
* Backported in [#71670](https://github.com/ClickHouse/ClickHouse/issues/71670): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#71940](https://github.com/ClickHouse/ClickHouse/issues/71940): Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
* Backported in [#71922](https://github.com/ClickHouse/ClickHouse/issues/71922): Allow_reorder_prewhere_conditions is on by default with old compatibility settings. [#71867](https://github.com/ClickHouse/ClickHouse/pull/71867) ([Raúl Marín](https://github.com/Algunenano)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#71588](https://github.com/ClickHouse/ClickHouse/issues/71588): Fix mismatched aggreage function name of quantileExactWeightedInterpolated. The bug was introduced in https://github.com/ClickHouse/ClickHouse/pull/69619. cc @Algunenano. [#71168](https://github.com/ClickHouse/ClickHouse/pull/71168) ([李扬](https://github.com/taiyang-li)).
* Backported in [#71357](https://github.com/ClickHouse/ClickHouse/issues/71357): Fix bad_weak_ptr exception with Dynamic in functions comparison. [#71183](https://github.com/ClickHouse/ClickHouse/pull/71183) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71467](https://github.com/ClickHouse/ClickHouse/issues/71467): Fix bug of memory usage increase if enable_filesystem_cache=1, but disk in storage configuration did not have any cache configuration. [#71261](https://github.com/ClickHouse/ClickHouse/pull/71261) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#71355](https://github.com/ClickHouse/ClickHouse/issues/71355): Fix possible error "Cannot read all data" erros during deserialization of LowCardinality dictionary from Dynamic column. [#71299](https://github.com/ClickHouse/ClickHouse/pull/71299) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71324](https://github.com/ClickHouse/ClickHouse/issues/71324): Fix incomplete cleanup of parallel output format in the client. [#71304](https://github.com/ClickHouse/ClickHouse/pull/71304) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71466](https://github.com/ClickHouse/ClickHouse/issues/71466): Added missing unescaping in named collections. Without fix clickhouse-server can't start. [#71308](https://github.com/ClickHouse/ClickHouse/pull/71308) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Backported in [#71393](https://github.com/ClickHouse/ClickHouse/issues/71393): Fix inconsistent AST formatting when granting wrong wildcard grants [#71309](https://github.com/ClickHouse/ClickHouse/issues/71309). [#71332](https://github.com/ClickHouse/ClickHouse/pull/71332) ([pufit](https://github.com/pufit)).
* Backported in [#71379](https://github.com/ClickHouse/ClickHouse/issues/71379): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)).
* Backported in [#71751](https://github.com/ClickHouse/ClickHouse/issues/71751): Check suspicious and experimental types in JSON type hints. [#71369](https://github.com/ClickHouse/ClickHouse/pull/71369) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71451](https://github.com/ClickHouse/ClickHouse/issues/71451): Start memory worker thread on non-Linux OS too (fixes [#71051](https://github.com/ClickHouse/ClickHouse/issues/71051)). [#71384](https://github.com/ClickHouse/ClickHouse/pull/71384) ([Alexandre Snarskii](https://github.com/snar)).
* Backported in [#71608](https://github.com/ClickHouse/ClickHouse/issues/71608): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71493](https://github.com/ClickHouse/ClickHouse/issues/71493): Fix crash in `mongodb` table function when passing wrong arguments (e.g. `NULL`). [#71426](https://github.com/ClickHouse/ClickHouse/pull/71426) ([Vladimir Cherkasov](https://github.com/vdimir)).
* Backported in [#71815](https://github.com/ClickHouse/ClickHouse/issues/71815): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71521](https://github.com/ClickHouse/ClickHouse/issues/71521): Fix possible error `Argument for function must be constant` (old analyzer) in case when arrayJoin can apparently appear in `WHERE` condition. Regression after https://github.com/ClickHouse/ClickHouse/pull/65414. [#71476](https://github.com/ClickHouse/ClickHouse/pull/71476) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#71555](https://github.com/ClickHouse/ClickHouse/issues/71555): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71618](https://github.com/ClickHouse/ClickHouse/issues/71618): Analyzer fix when query inside materialized view uses IN with CTE. Closes [#65598](https://github.com/ClickHouse/ClickHouse/issues/65598). [#71538](https://github.com/ClickHouse/ClickHouse/pull/71538) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#71570](https://github.com/ClickHouse/ClickHouse/issues/71570): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71646](https://github.com/ClickHouse/ClickHouse/issues/71646): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#71880](https://github.com/ClickHouse/ClickHouse/issues/71880): Fix LOGICAL_ERROR when doing ALTER with empty tuple. This fixes [#71647](https://github.com/ClickHouse/ClickHouse/issues/71647). [#71679](https://github.com/ClickHouse/ClickHouse/pull/71679) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#71741](https://github.com/ClickHouse/ClickHouse/issues/71741): Don't transform constant set in predicates over partition columns in case of NOT IN operator. [#71695](https://github.com/ClickHouse/ClickHouse/pull/71695) ([Eduard Karacharov](https://github.com/korowa)).
* Backported in [#72012](https://github.com/ClickHouse/ClickHouse/issues/72012): Fix exception for toDayOfWeek on WHERE condition with primary key of DateTime64 type. [#71849](https://github.com/ClickHouse/ClickHouse/pull/71849) ([Yakov Olkhovskiy](https://github.com/yakov-olkhovskiy)).
* Backported in [#71897](https://github.com/ClickHouse/ClickHouse/issues/71897): Fixed filling of defaults after parsing into sparse columns. [#71854](https://github.com/ClickHouse/ClickHouse/pull/71854) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#71955](https://github.com/ClickHouse/ClickHouse/issues/71955): Fix data race between the progress indicator and the progress table in clickhouse-client. This issue is visible when FROM INFILE is used. Intercept keystrokes during INSERT queries to toggle progress table display. [#71901](https://github.com/ClickHouse/ClickHouse/pull/71901) ([Julia Kartseva](https://github.com/jkartseva)).
* Backported in [#72006](https://github.com/ClickHouse/ClickHouse/issues/72006): Fix a crash in clickhouse-client syntax highlighting. Closes [#71864](https://github.com/ClickHouse/ClickHouse/issues/71864). [#71949](https://github.com/ClickHouse/ClickHouse/pull/71949) ([Nikolay Degterinsky](https://github.com/evillique)).
#### Build/Testing/Packaging Improvement
* Backported in [#71692](https://github.com/ClickHouse/ClickHouse/issues/71692): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#71387](https://github.com/ClickHouse/ClickHouse/issues/71387): Remove bad test `test_system_replicated_fetches`. [#71071](https://github.com/ClickHouse/ClickHouse/pull/71071) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#71586](https://github.com/ClickHouse/ClickHouse/issues/71586): Fix `WITH TOTALS` in subquery with parallel replicas. [#71224](https://github.com/ClickHouse/ClickHouse/pull/71224) ([Nikita Taranov](https://github.com/nickitat)).
* Backported in [#71437](https://github.com/ClickHouse/ClickHouse/issues/71437): Ignore `No such key` exceptions in some cases. [#71236](https://github.com/ClickHouse/ClickHouse/pull/71236) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#71629](https://github.com/ClickHouse/ClickHouse/issues/71629): Fix compatibility with refreshable materialized views created by old clickhouse servers. [#71556](https://github.com/ClickHouse/ClickHouse/pull/71556) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#71805](https://github.com/ClickHouse/ClickHouse/issues/71805): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#71832](https://github.com/ClickHouse/ClickHouse/issues/71832): Closes [#71780](https://github.com/ClickHouse/ClickHouse/issues/71780). [#71818](https://github.com/ClickHouse/ClickHouse/pull/71818) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#71840](https://github.com/ClickHouse/ClickHouse/issues/71840): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

View File

@ -0,0 +1,31 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.13.40-lts (7acabd77389) FIXME as compared to v24.3.12.75-lts (7cb5dff8019)
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#63976](https://github.com/ClickHouse/ClickHouse/issues/63976): Fix intersect parts when restart after drop range. [#63202](https://github.com/ClickHouse/ClickHouse/pull/63202) ([Han Fei](https://github.com/hanfei1991)).
* Backported in [#71482](https://github.com/ClickHouse/ClickHouse/issues/71482): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#70451](https://github.com/ClickHouse/ClickHouse/issues/70451): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70619](https://github.com/ClickHouse/ClickHouse/issues/70619): Fix server segfault on creating a materialized view with two selects and an `INTERSECT`, e.g. `CREATE MATERIALIZED VIEW v0 AS (SELECT 1) INTERSECT (SELECT 1);`. [#70264](https://github.com/ClickHouse/ClickHouse/pull/70264) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#70877](https://github.com/ClickHouse/ClickHouse/issues/70877): Fix table creation with `CREATE ... AS table_function()` with database `Replicated` and unavailable table function source on secondary replica. [#70511](https://github.com/ClickHouse/ClickHouse/pull/70511) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#70571](https://github.com/ClickHouse/ClickHouse/issues/70571): Ignore all output on async insert with `wait_for_async_insert=1`. Closes [#62644](https://github.com/ClickHouse/ClickHouse/issues/62644). [#70530](https://github.com/ClickHouse/ClickHouse/pull/70530) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#71146](https://github.com/ClickHouse/ClickHouse/issues/71146): Ignore frozen_metadata.txt while traversing shadow directory from system.remote_data_paths. [#70590](https://github.com/ClickHouse/ClickHouse/pull/70590) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#70682](https://github.com/ClickHouse/ClickHouse/issues/70682): Fix creation of stateful window functions on misaligned memory. [#70631](https://github.com/ClickHouse/ClickHouse/pull/70631) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71113](https://github.com/ClickHouse/ClickHouse/issues/71113): Fix a crash and a leak in AggregateFunctionGroupArraySorted. [#70820](https://github.com/ClickHouse/ClickHouse/pull/70820) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#70990](https://github.com/ClickHouse/ClickHouse/issues/70990): Fix a logical error due to negative zeros in the two-level hash table. This closes [#70973](https://github.com/ClickHouse/ClickHouse/issues/70973). [#70979](https://github.com/ClickHouse/ClickHouse/pull/70979) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#71246](https://github.com/ClickHouse/ClickHouse/issues/71246): Fixed named sessions not being closed and hanging on forever under certain circumstances. [#70998](https://github.com/ClickHouse/ClickHouse/pull/70998) ([Márcio Martins](https://github.com/marcio-absmartly)).
* Backported in [#71371](https://github.com/ClickHouse/ClickHouse/issues/71371): Add try/catch to data parts destructors to avoid terminate. [#71364](https://github.com/ClickHouse/ClickHouse/pull/71364) ([alesapin](https://github.com/alesapin)).
* Backported in [#71594](https://github.com/ClickHouse/ClickHouse/issues/71594): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#71022](https://github.com/ClickHouse/ClickHouse/issues/71022): Fix dropping of file cache in CHECK query in case of enabled transactions. [#69256](https://github.com/ClickHouse/ClickHouse/pull/69256) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#70384](https://github.com/ClickHouse/ClickHouse/issues/70384): CI: Enable Integration Tests for backport PRs. [#70329](https://github.com/ClickHouse/ClickHouse/pull/70329) ([Max Kainov](https://github.com/maxknv)).
* Backported in [#70538](https://github.com/ClickHouse/ClickHouse/issues/70538): Remove slow poll() logs in keeper. [#70508](https://github.com/ClickHouse/ClickHouse/pull/70508) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#70971](https://github.com/ClickHouse/ClickHouse/issues/70971): Limiting logging some lines about configs. [#70879](https://github.com/ClickHouse/ClickHouse/pull/70879) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).

View File

@ -0,0 +1,31 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.14.35-lts (cfa4e62b775) FIXME as compared to v24.3.13.40-lts (7acabd77389)
#### Improvement
* Backported in [#71711](https://github.com/ClickHouse/ClickHouse/issues/71711): CLICKHOUSE_PASSWORD is escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
* Backported in [#71662](https://github.com/ClickHouse/ClickHouse/issues/71662): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#65755](https://github.com/ClickHouse/ClickHouse/issues/65755): Fix the `Expression nodes list expected 1 projection names` and `Unknown expression or identifier` errors for queries with aliases to `GLOBAL IN.`. [#64517](https://github.com/ClickHouse/ClickHouse/pull/64517) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#71600](https://github.com/ClickHouse/ClickHouse/issues/71600): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71842](https://github.com/ClickHouse/ClickHouse/issues/71842): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71562](https://github.com/ClickHouse/ClickHouse/issues/71562): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71731](https://github.com/ClickHouse/ClickHouse/issues/71731): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
#### Build/Testing/Packaging Improvement
* Backported in [#71697](https://github.com/ClickHouse/ClickHouse/issues/71697): Vendor in rust dependencies. [#62297](https://github.com/ClickHouse/ClickHouse/pull/62297) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71688](https://github.com/ClickHouse/ClickHouse/issues/71688): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#71808](https://github.com/ClickHouse/ClickHouse/issues/71808): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#71834](https://github.com/ClickHouse/ClickHouse/issues/71834): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Fix bitShift test after backport. [#71861](https://github.com/ClickHouse/ClickHouse/pull/71861) ([Pablo Marcos](https://github.com/pamarcos)).
* Revert "Merge pull request [#71861](https://github.com/ClickHouse/ClickHouse/issues/71861) from pamarcos/fix-bitshift-test". [#71871](https://github.com/ClickHouse/ClickHouse/pull/71871) ([Pablo Marcos](https://github.com/pamarcos)).

View File

@ -0,0 +1,37 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.7.41-lts (e28553d4f2b) FIXME as compared to v24.8.6.70-lts (ddb8c219771)
#### Improvement
* Backported in [#71713](https://github.com/ClickHouse/ClickHouse/issues/71713): CLICKHOUSE_PASSWORD is escaped for XML in clickhouse image's entrypoint. [#69301](https://github.com/ClickHouse/ClickHouse/pull/69301) ([aohoyd](https://github.com/aohoyd)).
* Backported in [#71666](https://github.com/ClickHouse/ClickHouse/issues/71666): When user/group is given as ID, the `clickhouse su` fails. This patch fixes it to accept `UID:GID` as well. ### Documentation entry for user-facing changes. [#71626](https://github.com/ClickHouse/ClickHouse/pull/71626) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#71936](https://github.com/ClickHouse/ClickHouse/issues/71936): Update `HostResolver` 3 times in a `history` period. [#71863](https://github.com/ClickHouse/ClickHouse/pull/71863) ([Sema Checherinda](https://github.com/CheSema)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#71486](https://github.com/ClickHouse/ClickHouse/issues/71486): Fix `Content-Encoding` not sent in some compressed responses. [#64802](https://github.com/ClickHouse/ClickHouse/issues/64802). [#68975](https://github.com/ClickHouse/ClickHouse/pull/68975) ([Konstantin Bogdanov](https://github.com/thevar1able)).
* Backported in [#71462](https://github.com/ClickHouse/ClickHouse/issues/71462): Added missing unescaping in named collections. Without fix clickhouse-server can't start. [#71308](https://github.com/ClickHouse/ClickHouse/pull/71308) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Backported in [#71747](https://github.com/ClickHouse/ClickHouse/issues/71747): Check suspicious and experimental types in JSON type hints. [#71369](https://github.com/ClickHouse/ClickHouse/pull/71369) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71604](https://github.com/ClickHouse/ClickHouse/issues/71604): Fix error Invalid number of rows in Chunk with Variant column. [#71388](https://github.com/ClickHouse/ClickHouse/pull/71388) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#71826](https://github.com/ClickHouse/ClickHouse/issues/71826): Fix crash with optimize_rewrite_array_exists_to_has. [#71432](https://github.com/ClickHouse/ClickHouse/pull/71432) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71517](https://github.com/ClickHouse/ClickHouse/issues/71517): Fix possible error `Argument for function must be constant` (old analyzer) in case when arrayJoin can apparently appear in `WHERE` condition. Regression after https://github.com/ClickHouse/ClickHouse/pull/65414. [#71476](https://github.com/ClickHouse/ClickHouse/pull/71476) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#71551](https://github.com/ClickHouse/ClickHouse/issues/71551): Prevent crash in SortCursor with 0 columns (old analyzer). [#71494](https://github.com/ClickHouse/ClickHouse/pull/71494) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71614](https://github.com/ClickHouse/ClickHouse/issues/71614): Analyzer fix when query inside materialized view uses IN with CTE. Closes [#65598](https://github.com/ClickHouse/ClickHouse/issues/65598). [#71538](https://github.com/ClickHouse/ClickHouse/pull/71538) ([Maksim Kita](https://github.com/kitaisreal)).
* Backported in [#71566](https://github.com/ClickHouse/ClickHouse/issues/71566): Avoid crash when using a UDF in a constraint. [#71541](https://github.com/ClickHouse/ClickHouse/pull/71541) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#71727](https://github.com/ClickHouse/ClickHouse/issues/71727): Return 0 or default char instead of throwing an error in bitShift functions in case of out of bounds. [#71580](https://github.com/ClickHouse/ClickHouse/pull/71580) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#71876](https://github.com/ClickHouse/ClickHouse/issues/71876): Fix LOGICAL_ERROR when doing ALTER with empty tuple. This fixes [#71647](https://github.com/ClickHouse/ClickHouse/issues/71647). [#71679](https://github.com/ClickHouse/ClickHouse/pull/71679) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#71737](https://github.com/ClickHouse/ClickHouse/issues/71737): Don't transform constant set in predicates over partition columns in case of NOT IN operator. [#71695](https://github.com/ClickHouse/ClickHouse/pull/71695) ([Eduard Karacharov](https://github.com/korowa)).
* Backported in [#72002](https://github.com/ClickHouse/ClickHouse/issues/72002): Fix a crash in clickhouse-client syntax highlighting. Closes [#71864](https://github.com/ClickHouse/ClickHouse/issues/71864). [#71949](https://github.com/ClickHouse/ClickHouse/pull/71949) ([Nikolay Degterinsky](https://github.com/evillique)).
#### Build/Testing/Packaging Improvement
* Backported in [#71690](https://github.com/ClickHouse/ClickHouse/issues/71690): Improve clickhouse-server Dockerfile.ubuntu. Deprecate `CLICKHOUSE_UID/CLICKHOUSE_GID` envs. Remove `CLICKHOUSE_DOCKER_RESTART_ON_EXIT` processing to complien requirements. Consistent `clickhouse/clickhouse-server/clickhouse-keeper` execution to not have it plain in one place and `/usr/bin/clickhouse*` in another. [#71573](https://github.com/ClickHouse/ClickHouse/pull/71573) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#71801](https://github.com/ClickHouse/ClickHouse/issues/71801): Fix issues we face on orphane backport branches and closed release PRs, when fake-master events are sent to the check DB. [#71782](https://github.com/ClickHouse/ClickHouse/pull/71782) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#71836](https://github.com/ClickHouse/ClickHouse/issues/71836): The change has already been applied to https://github.com/docker-library/official-images/pull/17876. Backport it to every branch to have a proper `Dockerfile.ubuntu` there. [#71825](https://github.com/ClickHouse/ClickHouse/pull/71825) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).

File diff suppressed because one or more lines are too long

View File

@ -4,9 +4,13 @@ sidebar_position: 50
sidebar_label: EmbeddedRocksDB
---
import CloudNotSupportedBadge from '@theme/badges/CloudNotSupportedBadge';
# EmbeddedRocksDB Engine
This engine allows integrating ClickHouse with [rocksdb](http://rocksdb.org/).
<CloudNotSupportedBadge />
This engine allows integrating ClickHouse with [RocksDB](http://rocksdb.org/).
## Creating a Table {#creating-a-table}

View File

@ -122,7 +122,7 @@ Default value: `0`.
### s3queue_polling_min_timeout_ms {#polling_min_timeout_ms}
Minimal timeout before next polling (in milliseconds).
Specifies the minimum time, in milliseconds, that ClickHouse waits before making the next polling attempt.
Possible values:
@ -132,7 +132,7 @@ Default value: `1000`.
### s3queue_polling_max_timeout_ms {#polling_max_timeout_ms}
Maximum timeout before next polling (in milliseconds).
Defines the maximum time, in milliseconds, that ClickHouse waits before initiating the next polling attempt.
Possible values:
@ -142,7 +142,7 @@ Default value: `10000`.
### s3queue_polling_backoff_ms {#polling_backoff_ms}
Polling backoff (in milliseconds).
Determines the additional wait time added to the previous polling interval when no new files are found. The next poll occurs after the sum of the previous interval and this backoff value, or the maximum interval, whichever is lower.
Possible values:

View File

@ -10,6 +10,11 @@ The engine inherits from [MergeTree](../../../engines/table-engines/mergetree-fa
You can use `AggregatingMergeTree` tables for incremental data aggregation, including for aggregated materialized views.
You can see an example of how to use the AggregatingMergeTree and Aggregate functions in the below video:
<div class='vimeo-container'>
<iframe width="1030" height="579" src="https://www.youtube.com/embed/pryhI4F_zqQ" title="Aggregation States in ClickHouse" frameborder="0" allow="accelerometer; autoplay; clipboard-write; encrypted-media; gyroscope; picture-in-picture; web-share" referrerpolicy="strict-origin-when-cross-origin" allowfullscreen></iframe>
</div>
The engine processes all columns with the following types:
## [AggregateFunction](../../../sql-reference/data-types/aggregatefunction.md)

View File

@ -54,7 +54,7 @@ Parameters:
- `distance_function`: either `L2Distance` (the [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance) - the length of a
line between two points in Euclidean space), or `cosineDistance` (the [cosine
distance](https://en.wikipedia.org/wiki/Cosine_similarity#Cosine_distance)- the angle between two non-zero vectors).
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing the vector with reduced precision (optional, default: `bf16`)
- `quantization`: either `f64`, `f32`, `f16`, `bf16`, or `i8` for storing vectors with reduced precision (optional, default: `bf16`)
- `hnsw_max_connections_per_layer`: the number of neighbors per HNSW graph node, also known as `M` in the [HNSW
paper](https://doi.org/10.1109/TPAMI.2018.2889473) (optional, default: 32)
- `hnsw_candidate_list_size_for_construction`: the size of the dynamic candidate list when constructing the HNSW graph, also known as
@ -92,8 +92,8 @@ Vector similarity indexes currently support two distance functions:
- `cosineDistance`, also called cosine similarity, is the cosine of the angle between two (non-zero) vectors
([Wikipedia](https://en.wikipedia.org/wiki/Cosine_similarity)).
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16` or `i8`.
If no scalar kind was specified during index creation, `f16` is used as default.
Vector similarity indexes allows storing the vectors in reduced precision formats. Supported scalar kinds are `f64`, `f32`, `f16`, `bf16`,
and `i8`. If no scalar kind was specified during index creation, `bf16` is used as default.
For normalized data, `L2Distance` is usually a better choice, otherwise `cosineDistance` is recommended to compensate for scale. If no
distance function was specified during index creation, `L2Distance` is used as default.

View File

@ -684,8 +684,7 @@ If you perform the `SELECT` query between merges, you may get expired data. To a
**See Also**
- [ttl_only_drop_parts](/docs/en/operations/settings/settings.md/#ttl_only_drop_parts) setting
- [ttl_only_drop_parts](/docs/en/operations/settings/merge-tree-settings#ttl_only_drop_parts) setting
## Disk types

View File

@ -33,6 +33,21 @@ Then, generate the data. Parameter `-s` specifies the scale factor. For example,
./dbgen -s 100
```
Detailed table sizes with scale factor 100:
| Table | size (in rows) | size (compressed in ClickHouse) |
|----------|----------------|---------------------------------|
| nation | 25 | 2 kB |
| region | 5 | 1 kB |
| part | 20.000.000 | 895 MB |
| supplier | 1.000.000 | 75 MB |
| partsupp | 80.000.000 | 4.37 GB |
| customer | 15.000.000 | 1.19 GB |
| orders | 150.000.000 | 6.15 GB |
| lineitem | 600.00.00 | 26.69 GB |
(Compressed sizes in ClickHouse are taken from `system.tables.total_bytes` and based on below table definitions.)
Now create tables in ClickHouse.
We stick as closely as possible to the rules of the TPC-H specification:
@ -151,10 +166,37 @@ clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO orders FORMAT
clickhouse-client --format_csv_delimiter '|' --query "INSERT INTO lineitem FORMAT CSV" < lineitem.tbl
```
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
:::note
Instead of using tpch-kit and generating the tables by yourself, you can alternatively import the data from a public S3 bucket. Make sure
to create empty tables first using above `CREATE` statements.
```sql
-- Scaling factor 1
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/nation.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/region.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/part.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/supplier.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/partsupp.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/customer.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/orders.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/1/lineitem.tbl', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
-- Scaling factor 100
INSERT INTO nation SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/nation.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO region SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/region.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO part SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/part.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO supplier SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/supplier.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO partsupp SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/partsupp.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO customer SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/customer.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO orders SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/orders.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
INSERT INTO lineitem SELECT * FROM s3('https://clickhouse-datasets.s3.amazonaws.com/h/100/lineitem.tbl.gz', NOSIGN, CSV) SETTINGS format_csv_delimiter = '|', input_format_defaults_for_omitted_fields = 1, input_format_csv_empty_as_default = 1;
````
:::
## Queries
The queries are generated by `./qgen -s <scaling_factor>`. Example queries for `s = 100`:
**Correctness**
The result of the queries agrees with the official results unless mentioned otherwise. To verify, generate a TPC-H database with scale

View File

@ -16,7 +16,7 @@ You have four options for getting up and running with ClickHouse:
- **[ClickHouse Cloud](https://clickhouse.com/cloud/):** The official ClickHouse as a service, - built by, maintained and supported by the creators of ClickHouse
- **[Quick Install](#quick-install):** an easy-to-download binary for testing and developing with ClickHouse
- **[Production Deployments](#available-installation-options):** ClickHouse can run on any Linux, FreeBSD, or macOS with x86-64, modern ARM (ARMv8.2-A up), or PowerPC64LE CPU architecture
- **[Docker Image](https://hub.docker.com/r/clickhouse/clickhouse-server/):** use the official Docker image in Docker Hub
- **[Docker Image](https://hub.docker.com/_/clickhouse):** use the official Docker image in Docker Hub
## ClickHouse Cloud

View File

@ -9,7 +9,7 @@ sidebar_label: Prometheus protocols
## Exposing metrics {#expose}
:::note
ClickHouse Cloud does not currently support connecting to Prometheus. To be notified when this feature is supported, please contact support@clickhouse.com.
If you are using ClickHouse Cloud, you can expose metrics to Prometheus using the [Prometheus Integration](/en/integrations/prometheus).
:::
ClickHouse can expose its own metrics for scraping from Prometheus:

View File

@ -65,6 +65,34 @@ sudo rm -f /etc/yum.repos.d/clickhouse.repo
After that follow the [install guide](../getting-started/install.md#from-rpm-packages)
### You Can't Run Docker Container
You are running a simple `docker run clickhouse/clickhouse-server` and it crashes with a stack trace similar to following:
```
$ docker run -it clickhouse/clickhouse-server
........
2024.11.06 21:04:48.912036 [ 1 ] {} <Information> SentryWriter: Sending crash reports is disabled
Poco::Exception. Code: 1000, e.code() = 0, System exception: cannot start thread, Stack trace (when copying this message, always include the lines below):
0. Poco::ThreadImpl::startImpl(Poco::SharedPtr<Poco::Runnable, Poco::ReferenceCounter, Poco::ReleasePolicy<Poco::Runnable>>) @ 0x00000000157c7b34
1. Poco::Thread::start(Poco::Runnable&) @ 0x00000000157c8a0e
2. BaseDaemon::initializeTerminationAndSignalProcessing() @ 0x000000000d267a14
3. BaseDaemon::initialize(Poco::Util::Application&) @ 0x000000000d2652cb
4. DB::Server::initialize(Poco::Util::Application&) @ 0x000000000d128b38
5. Poco::Util::Application::run() @ 0x000000001581cfda
6. DB::Server::run() @ 0x000000000d1288f0
7. Poco::Util::ServerApplication::run(int, char**) @ 0x0000000015825e27
8. mainEntryClickHouseServer(int, char**) @ 0x000000000d125b38
9. main @ 0x0000000007ea4eee
10. ? @ 0x00007f67ff946d90
11. ? @ 0x00007f67ff946e40
12. _start @ 0x00000000062e802e
(version 24.10.1.2812 (official build))
```
The reason is an old docker daemon with version lower than `20.10.10`. A way to fix it either upgrading it, or running `docker run [--privileged | --security-opt seccomp=unconfined]`. The latter has security implications.
## Connecting to the Server {#troubleshooting-accepts-no-connections}
Possible issues:

View File

@ -25,9 +25,10 @@ Query caches can generally be viewed as transactionally consistent or inconsiste
slowly enough that the database only needs to compute the report once (represented by the first `SELECT` query). Further queries can be
served directly from the query cache. In this example, a reasonable validity period could be 30 min.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages interacting with the database. As a result,
the same caching logic and configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side.
This reduces maintenance effort and avoids redundancy.
Transactionally inconsistent caching is traditionally provided by client tools or proxy packages (e.g.
[chproxy](https://www.chproxy.org/configuration/caching/)) interacting with the database. As a result, the same caching logic and
configuration is often duplicated. With ClickHouse's query cache, the caching logic moves to the server side. This reduces maintenance
effort and avoids redundancy.
## Configuration Settings and Usage
@ -138,7 +139,10 @@ is only cached if the query runs longer than 5 seconds. It is also possible to s
cached - for that use setting [query_cache_min_query_runs](settings/settings.md#query-cache-min-query-runs).
Entries in the query cache become stale after a certain time period (time-to-live). By default, this period is 60 seconds but a different
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl).
value can be specified at session, profile or query level using setting [query_cache_ttl](settings/settings.md#query-cache-ttl). The query
cache evicts entries "lazily", i.e. when an entry becomes stale, it is not immediately removed from the cache. Instead, when a new entry
is to be inserted into the query cache, the database checks whether the cache has enough free space for the new entry. If this is not the
case, the database tries to remove all stale entries. If the cache still has not enough free space, the new entry is not inserted.
Entries in the query cache are compressed by default. This reduces the overall memory consumption at the cost of slower writes into / reads
from the query cache. To disable compression, use setting [query_cache_compress_entries](settings/settings.md#query-cache-compress-entries).
@ -188,14 +192,9 @@ Also, results of queries with non-deterministic functions are not cached by defa
To force caching of results of queries with non-deterministic functions regardless, use setting
[query_cache_nondeterministic_function_handling](settings/settings.md#query-cache-nondeterministic-function-handling).
Results of queries that involve system tables, e.g. `system.processes` or `information_schema.tables`, are not cached by default. To force
caching of results of queries with system tables regardless, use setting
[query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
:::note
Prior to ClickHouse v23.11, setting 'query_cache_store_results_of_queries_with_nondeterministic_functions = 0 / 1' controlled whether
results of queries with non-deterministic results were cached. In newer ClickHouse versions, this setting is obsolete and has no effect.
:::
Results of queries that involve system tables (e.g. [system.processes](system-tables/processes.md)` or
[information_schema.tables](system-tables/information_schema.md)) are not cached by default. To force caching of results of queries with
system tables regardless, use setting [query_cache_system_table_handling](settings/settings.md#query-cache-system-table-handling).
Finally, entries in the query cache are not shared between users due to security reasons. For example, user A must not be able to bypass a
row policy on a table by running the same query as another user B for whom no such policy exists. However, if necessary, cache entries can

View File

@ -131,16 +131,6 @@ Type: UInt64
Default: 8
## background_pool_size
Sets the number of threads performing background merges and mutations for tables with MergeTree engines. You can only increase the number of threads at runtime. To lower the number of threads you have to restart the server. By adjusting this setting, you manage CPU and disk load. Smaller pool size utilizes less CPU and disk resources, but background processes advance slower which might eventually impact query performance.
Before changing it, please also take a look at related MergeTree settings, such as `number_of_free_entries_in_pool_to_lower_max_size_of_merge` and `number_of_free_entries_in_pool_to_execute_mutation`.
Type: UInt64
Default: 16
## background_schedule_pool_size
The maximum number of threads that will be used for constantly executing some lightweight periodic operations for replicated tables, Kafka streaming, and DNS cache updates.
@ -607,6 +597,30 @@ If number of tables is greater than this value, server will throw an exception.
<max_table_num_to_throw>400</max_table_num_to_throw>
```
## max\_replicated\_table\_num\_to\_throw {#max-replicated-table-num-to-throw}
If number of replicated tables is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_replicated_table_num_to_throw>400</max_replicated_table_num_to_throw>
```
## max\_dictionary\_num\_to\_throw {#max-dictionary-num-to-throw}
If number of dictionaries is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_dictionary_num_to_throw>400</max_dictionary_num_to_throw>
```
## max\_view\_num\_to\_throw {#max-view-num-to-throw}
If number of views is greater than this value, server will throw an exception. 0 means no limitation. Only count table in Atomic/Ordinary/Replicated/Lazy database engine.
**Example**
```xml
<max_view_num_to_throw>400</max_view_num_to_throw>
```
## max\_database\_num\_to\_throw {#max-table-num-to-throw}
If number of _database is greater than this value, server will throw an exception. 0 means no limitation.
Default value: 0
@ -1629,6 +1643,7 @@ You can specify the log format that will be outputted in the console log. Curren
```json
{
"date_time_utc": "2024-11-06T09:06:09Z",
"date_time": "1650918987.180175",
"thread_name": "#1",
"thread_id": "254545",

View File

@ -78,6 +78,16 @@ If `min_merge_bytes_to_use_direct_io = 0`, then direct I/O is disabled.
Default value: `10 * 1024 * 1024 * 1024` bytes.
## ttl_only_drop_parts
Controls whether data parts are fully dropped in MergeTree tables when all rows in that part have expired according to their `TTL` settings.
When `ttl_only_drop_parts` is disabled (by default), only the rows that have expired based on their TTL settings are removed.
When `ttl_only_drop_parts` is enabled, the entire part is dropped if all rows in that part have expired according to their `TTL` settings.
Default value: 0.
## merge_with_ttl_timeout
Minimum delay in seconds before repeating a merge with delete TTL.
@ -1095,3 +1105,13 @@ Possible values:
Default value: 0.0
Note that if both `min_free_disk_ratio_to_perform_insert` and `min_free_disk_bytes_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.
## cache_populated_by_fetch
A Cloud only setting.
When `cache_populated_by_fetch` is disabled (the default setting), new data parts are loaded into the cache only when a query is run that requires those parts.
If enabled, `cache_populated_by_fetch` will instead cause all nodes to load new data parts from storage into their cache without requiring a query to trigger such an action.
Default value: 0.

View File

@ -211,7 +211,7 @@ Number of threads in the server of the replicas communication protocol (without
The difference in time the thread for calculation of the asynchronous metrics was scheduled to wake up and the time it was in fact, woken up. A proxy-indicator of overall system latency and responsiveness.
### LoadAverage_*N*
### LoadAverage*N*
The whole system load, averaged with exponential smoothing over 1 minute. The load represents the number of threads across all the processes (the scheduling entities of the OS kernel), that are currently running by CPU or waiting for IO, or ready to run but not being scheduled at this point of time. This number includes all the processes, not only clickhouse-server. The number can be greater than the number of CPU cores, if the system is overloaded, and many processes are ready to run but waiting for CPU or IO.

View File

@ -19,7 +19,7 @@ Columns:
- `column` ([Nullable](../../sql-reference/data-types/nullable.md)([String](../../sql-reference/data-types/string.md))) — Name of a column to which access is granted.
- `is_partial_revoke` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Logical value. It shows whether some privileges have been revoked. Possible values:
- `0` — The row describes a partial revoke.
- `1` — The row describes a grant.
- `0` — The row describes a grant.
- `1` — The row describes a partial revoke.
- `grant_option` ([UInt8](../../sql-reference/data-types/int-uint.md#uint-ranges)) — Permission is granted `WITH GRANT OPTION`, see [GRANT](../../sql-reference/statements/grant.md#granting-privilege-syntax).

View File

@ -75,7 +75,7 @@ FROM t_null_big
└────────────────────┴─────────────────────┘
```
Also you can use [Tuple](/docs/en/sql-reference/data-types/tuple.md) to work around NULL skipping behavior. The a `Tuple` that contains only a `NULL` value is not `NULL`, so the aggregate functions won't skip that row because of that `NULL` value.
Also you can use [Tuple](/docs/en/sql-reference/data-types/tuple.md) to work around NULL skipping behavior. A `Tuple` that contains only a `NULL` value is not `NULL`, so the aggregate functions won't skip that row because of that `NULL` value.
```sql
SELECT
@ -110,7 +110,7 @@ GROUP BY v
└──────┴─────────┴──────────┘
```
And here is an example of of first_value with `RESPECT NULLS` where we can see that NULL inputs are respected and it will return the first value read, whether it's NULL or not:
And here is an example of first_value with `RESPECT NULLS` where we can see that NULL inputs are respected and it will return the first value read, whether it's NULL or not:
```sql
SELECT

View File

@ -5,7 +5,15 @@ sidebar_position: 102
# any
Selects the first encountered value of a column, ignoring any `NULL` values.
Selects the first encountered value of a column.
:::warning
As a query can be executed in arbitrary order, the result of this function is non-deterministic.
If you need an arbitrary but deterministic result, use functions [`min`](../reference/min.md) or [`max`](../reference/max.md).
:::
By default, the function never returns NULL, i.e. ignores NULL values in the input column.
However, if the function is used with the `RESPECT NULLS` modifier, it returns the first value reads no matter if NULL or not.
**Syntax**
@ -13,46 +21,51 @@ Selects the first encountered value of a column, ignoring any `NULL` values.
any(column) [RESPECT NULLS]
```
Aliases: `any_value`, [`first_value`](../reference/first_value.md).
Aliases `any(column)` (without `RESPECT NULLS`)
- `any_value`
- [`first_value`](../reference/first_value.md).
Alias for `any(column) RESPECT NULLS`
- `anyRespectNulls`, `any_respect_nulls`
- `firstValueRespectNulls`, `first_value_respect_nulls`
- `anyValueRespectNulls`, `any_value_respect_nulls`
**Parameters**
- `column`: The column name.
- `column`: The column name.
**Returned value**
:::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the first value passed, regardless of whether it is `NULL` or not.
:::
The first value encountered.
:::note
The return type of the function is the same as the input, except for LowCardinality which is discarded. This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column). You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
:::
:::warning
The query can be executed in any order and even in a different order each time, so the result of this function is indeterminate.
To get a determinate result, you can use the [`min`](../reference/min.md) or [`max`](../reference/max.md) function instead of `any`.
The return type of the function is the same as the input, except for LowCardinality which is discarded.
This means that given no rows as input it will return the default value of that type (0 for integers, or Null for a Nullable() column).
You might use the `-OrNull` [combinator](../../../sql-reference/aggregate-functions/combinators.md) ) to modify this behaviour.
:::
**Implementation details**
In some cases, you can rely on the order of execution. This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
In some cases, you can rely on the order of execution.
This applies to cases when `SELECT` comes from a subquery that uses `ORDER BY`.
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions. In other words, each column selected from the table must be used either in keys or inside aggregate functions. To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
When a `SELECT` query has the `GROUP BY` clause or at least one aggregate function, ClickHouse (in contrast to MySQL) requires that all expressions in the `SELECT`, `HAVING`, and `ORDER BY` clauses be calculated from keys or from aggregate functions.
In other words, each column selected from the table must be used either in keys or inside aggregate functions.
To get behavior like in MySQL, you can put the other columns in the `any` aggregate function.
**Example**
Query:
```sql
CREATE TABLE any_nulls (city Nullable(String)) ENGINE=Log;
CREATE TABLE tab (city Nullable(String)) ENGINE=Memory;
INSERT INTO any_nulls (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
INSERT INTO tab (city) VALUES (NULL), ('Amsterdam'), ('New York'), ('Tokyo'), ('Valencia'), (NULL);
SELECT any(city) FROM any_nulls;
SELECT any(city), anyRespectNulls(city) FROM tab;
```
```response
┌─any(city)─┐
│ Amsterdam │
└───────────┘
┌─any(city)─┬─anyRespectNulls(city)─
│ Amsterdam │ ᴺᵁᴸᴸ │
└───────────┴───────────────────────
```

View File

@ -5,7 +5,15 @@ sidebar_position: 105
# anyLast
Selects the last value encountered, ignoring any `NULL` values by default. The result is just as indeterminate as for the [any](../../../sql-reference/aggregate-functions/reference/any.md) function.
Selects the last encountered value of a column.
:::warning
As a query can be executed in arbitrary order, the result of this function is non-deterministic.
If you need an arbitrary but deterministic result, use functions [`min`](../reference/min.md) or [`max`](../reference/max.md).
:::
By default, the function never returns NULL, i.e. ignores NULL values in the input column.
However, if the function is used with the `RESPECT NULLS` modifier, it returns the first value reads no matter if NULL or not.
**Syntax**
@ -13,12 +21,15 @@ Selects the last value encountered, ignoring any `NULL` values by default. The r
anyLast(column) [RESPECT NULLS]
```
**Parameters**
- `column`: The column name.
Alias `anyLast(column)` (without `RESPECT NULLS`)
- [`last_value`](../reference/last_value.md).
:::note
Supports the `RESPECT NULLS` modifier after the function name. Using this modifier will ensure the function selects the last value passed, regardless of whether it is `NULL` or not.
:::
Aliases for `anyLast(column) RESPECT NULLS`
- `anyLastRespectNulls`, `anyLast_respect_nulls`
- `lastValueRespectNulls`, `last_value_respect_nulls`
**Parameters**
- `column`: The column name.
**Returned value**
@ -29,15 +40,15 @@ Supports the `RESPECT NULLS` modifier after the function name. Using this modifi
Query:
```sql
CREATE TABLE any_last_nulls (city Nullable(String)) ENGINE=Log;
CREATE TABLE tab (city Nullable(String)) ENGINE=Memory;
INSERT INTO any_last_nulls (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
INSERT INTO tab (city) VALUES ('Amsterdam'),(NULL),('New York'),('Tokyo'),('Valencia'),(NULL);
SELECT anyLast(city) FROM any_last_nulls;
SELECT anyLast(city), anyLastRespectNulls(city) FROM tab;
```
```response
┌─anyLast(city)─┐
│ Valencia │
└───────────────┘
┌─anyLast(city)─┬─anyLastRespectNulls(city)─
│ Valencia │ ᴺᵁᴸᴸ │
└───────────────┴───────────────────────────
```

View File

@ -7,119 +7,4 @@ toc_hidden: true
# List of Aggregate Functions
Standard aggregate functions:
- [count](../reference/count.md)
- [min](../reference/min.md)
- [max](../reference/max.md)
- [sum](../reference/sum.md)
- [avg](../reference/avg.md)
- [any](../reference/any.md)
- [stddevPop](../reference/stddevpop.md)
- [stddevPopStable](../reference/stddevpopstable.md)
- [stddevSamp](../reference/stddevsamp.md)
- [stddevSampStable](../reference/stddevsampstable.md)
- [varPop](../reference/varpop.md)
- [varSamp](../reference/varsamp.md)
- [corr](../reference/corr.md)
- [corr](../reference/corrstable.md)
- [corrMatrix](../reference/corrmatrix.md)
- [covarPop](../reference/covarpop.md)
- [covarStable](../reference/covarpopstable.md)
- [covarPopMatrix](../reference/covarpopmatrix.md)
- [covarSamp](../reference/covarsamp.md)
- [covarSampStable](../reference/covarsampstable.md)
- [covarSampMatrix](../reference/covarsampmatrix.md)
- [entropy](../reference/entropy.md)
- [exponentialMovingAverage](../reference/exponentialmovingaverage.md)
- [intervalLengthSum](../reference/intervalLengthSum.md)
- [kolmogorovSmirnovTest](../reference/kolmogorovsmirnovtest.md)
- [mannwhitneyutest](../reference/mannwhitneyutest.md)
- [median](../reference/median.md)
- [rankCorr](../reference/rankCorr.md)
- [sumKahan](../reference/sumkahan.md)
- [studentTTest](../reference/studentttest.md)
- [welchTTest](../reference/welchttest.md)
ClickHouse-specific aggregate functions:
- [aggThrow](../reference/aggthrow.md)
- [analysisOfVariance](../reference/analysis_of_variance.md)
- [any](../reference/any.md)
- [anyHeavy](../reference/anyheavy.md)
- [anyLast](../reference/anylast.md)
- [boundingRatio](../reference/boundrat.md)
- [first_value](../reference/first_value.md)
- [last_value](../reference/last_value.md)
- [argMin](../reference/argmin.md)
- [argMax](../reference/argmax.md)
- [avgWeighted](../reference/avgweighted.md)
- [topK](../reference/topk.md)
- [topKWeighted](../reference/topkweighted.md)
- [deltaSum](../reference/deltasum.md)
- [deltaSumTimestamp](../reference/deltasumtimestamp.md)
- [flameGraph](../reference/flame_graph.md)
- [groupArray](../reference/grouparray.md)
- [groupArrayLast](../reference/grouparraylast.md)
- [groupUniqArray](../reference/groupuniqarray.md)
- [groupArrayInsertAt](../reference/grouparrayinsertat.md)
- [groupArrayMovingAvg](../reference/grouparraymovingavg.md)
- [groupArrayMovingSum](../reference/grouparraymovingsum.md)
- [groupArraySample](../reference/grouparraysample.md)
- [groupArraySorted](../reference/grouparraysorted.md)
- [groupArrayIntersect](../reference/grouparrayintersect.md)
- [groupBitAnd](../reference/groupbitand.md)
- [groupBitOr](../reference/groupbitor.md)
- [groupBitXor](../reference/groupbitxor.md)
- [groupBitmap](../reference/groupbitmap.md)
- [groupBitmapAnd](../reference/groupbitmapand.md)
- [groupBitmapOr](../reference/groupbitmapor.md)
- [groupBitmapXor](../reference/groupbitmapxor.md)
- [sumWithOverflow](../reference/sumwithoverflow.md)
- [sumMap](../reference/summap.md)
- [sumMapWithOverflow](../reference/summapwithoverflow.md)
- [sumMapFiltered](../parametric-functions.md/#summapfiltered)
- [sumMapFilteredWithOverflow](../parametric-functions.md/#summapfilteredwithoverflow)
- [minMap](../reference/minmap.md)
- [maxMap](../reference/maxmap.md)
- [skewSamp](../reference/skewsamp.md)
- [skewPop](../reference/skewpop.md)
- [kurtSamp](../reference/kurtsamp.md)
- [kurtPop](../reference/kurtpop.md)
- [uniq](../reference/uniq.md)
- [uniqExact](../reference/uniqexact.md)
- [uniqCombined](../reference/uniqcombined.md)
- [uniqCombined64](../reference/uniqcombined64.md)
- [uniqHLL12](../reference/uniqhll12.md)
- [uniqTheta](../reference/uniqthetasketch.md)
- [quantile](../reference/quantile.md)
- [quantiles](../reference/quantiles.md)
- [quantileExact](../reference/quantileexact.md)
- [quantileExactLow](../reference/quantileexact.md#quantileexactlow)
- [quantileExactHigh](../reference/quantileexact.md#quantileexacthigh)
- [quantileExactWeighted](../reference/quantileexactweighted.md)
- [quantileTiming](../reference/quantiletiming.md)
- [quantileTimingWeighted](../reference/quantiletimingweighted.md)
- [quantileDeterministic](../reference/quantiledeterministic.md)
- [quantileTDigest](../reference/quantiletdigest.md)
- [quantileTDigestWeighted](../reference/quantiletdigestweighted.md)
- [quantileBFloat16](../reference/quantilebfloat16.md#quantilebfloat16)
- [quantileBFloat16Weighted](../reference/quantilebfloat16.md#quantilebfloat16weighted)
- [quantileDD](../reference/quantileddsketch.md#quantileddsketch)
- [simpleLinearRegression](../reference/simplelinearregression.md)
- [singleValueOrNull](../reference/singlevalueornull.md)
- [stochasticLinearRegression](../reference/stochasticlinearregression.md)
- [stochasticLogisticRegression](../reference/stochasticlogisticregression.md)
- [categoricalInformationValue](../reference/categoricalinformationvalue.md)
- [contingency](../reference/contingency.md)
- [cramersV](../reference/cramersv.md)
- [cramersVBiasCorrected](../reference/cramersvbiascorrected.md)
- [theilsU](../reference/theilsu.md)
- [maxIntersections](../reference/maxintersections.md)
- [maxIntersectionsPosition](../reference/maxintersectionsposition.md)
- [meanZTest](../reference/meanztest.md)
- [quantileGK](../reference/quantileGK.md)
- [quantileInterpolatedWeighted](../reference/quantileinterpolatedweighted.md)
- [sparkBar](../reference/sparkbar.md)
- [sumCount](../reference/sumcount.md)
- [largestTriangleThreeBuckets](../reference/largestTriangleThreeBuckets.md)
ClickHouse supports all standard SQL aggregate functions ([sum](../reference/sum.md), [avg](../reference/avg.md), [min](../reference/min.md), [max](../reference/max.md), [count](../reference/count.md)), as well as a wide range of other aggregate functions.

View File

@ -6,7 +6,9 @@ sidebar_label: AggregateFunction
# AggregateFunction
Aggregate functions can have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md). The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix. To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
Aggregate functions have an implementation-defined intermediate state that can be serialized to an `AggregateFunction(...)` data type and stored in a table, usually, by means of [a materialized view](../../sql-reference/statements/create/view.md).
The common way to produce an aggregate function state is by calling the aggregate function with the `-State` suffix.
To get the final result of aggregation in the future, you must use the same aggregate function with the `-Merge`suffix.
`AggregateFunction(name, types_of_arguments...)` — parametric data type.

View File

@ -512,6 +512,8 @@ The result of operator `<` for values `d1` with underlying type `T1` and `d2` wi
- If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared).
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
By default `Dynamic` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
Examples:
```sql
CREATE TABLE test (d Dynamic) ENGINE=Memory;
@ -535,7 +537,7 @@ SELECT d, dynamicType(d) FROM test;
```
```sql
SELECT d, dynamicType(d) FROM test ORDER BY d;
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
```
```sql
@ -557,7 +559,7 @@ Example:
```sql
CREATE TABLE test (d Dynamic) ENGINE=Memory;
INSERT INTO test VALUES (1::UInt32), (1::Int64), (100::UInt32), (100::Int64);
SELECT d, dynamicType(d) FROM test ORDER by d;
SELECT d, dynamicType(d) FROM test ORDER BY d SETTINGS allow_suspicious_types_in_order_by=1;
```
```text
@ -570,7 +572,7 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
```
```sql
SELECT d, dynamicType(d) FROM test GROUP by d;
SELECT d, dynamicType(d) FROM test GROUP by d SETTINGS allow_suspicious_types_in_group_by=1;
```
```text
@ -582,7 +584,7 @@ SELECT d, dynamicType(d) FROM test GROUP by d;
└─────┴────────────────┘
```
**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
**Note:** the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
## Reaching the limit in number of different data types stored inside Dynamic

View File

@ -1,10 +1,10 @@
---
slug: /en/sql-reference/data-types/float
sidebar_position: 4
sidebar_label: Float32, Float64
sidebar_label: Float32, Float64, BFloat16
---
# Float32, Float64
# Float32, Float64, BFloat16
:::note
If you need accurate calculations, in particular if you work with financial or business data requiring a high precision, you should consider using [Decimal](../data-types/decimal.md) instead.
@ -117,3 +117,11 @@ SELECT 0 / 0
```
See the rules for `NaN` sorting in the section [ORDER BY clause](../../sql-reference/statements/select/order-by.md).
## BFloat16
`BFloat16` is a 16-bit floating point data type with 8-bit exponent, sign, and 7-bit mantissa.
It is useful for machine learning and AI applications.
ClickHouse supports conversions between `Float32` and `BFloat16`. Most of other operations are not supported.

View File

@ -6,29 +6,8 @@ sidebar_position: 1
# Data Types in ClickHouse
ClickHouse can store various kinds of data in table cells. This section describes the supported data types and special considerations for using and/or implementing them if any.
This section describes the data types supported by ClickHouse, for example [integers](int-uint.md), [floats](float.md) and [strings](string.md).
:::note
You can check whether a data type name is case-sensitive in the [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) table.
:::
ClickHouse data types include:
- **Integer types**: [signed and unsigned integers](./int-uint.md) (`UInt8`, `UInt16`, `UInt32`, `UInt64`, `UInt128`, `UInt256`, `Int8`, `Int16`, `Int32`, `Int64`, `Int128`, `Int256`)
- **Floating-point numbers**: [floats](./float.md)(`Float32` and `Float64`) and [`Decimal` values](./decimal.md)
- **Boolean**: ClickHouse has a [`Boolean` type](./boolean.md)
- **Strings**: [`String`](./string.md) and [`FixedString`](./fixedstring.md)
- **Dates**: use [`Date`](./date.md) and [`Date32`](./date32.md) for days, and [`DateTime`](./datetime.md) and [`DateTime64`](./datetime64.md) for instances in time
- **Object**: the [`Object`](./json.md) stores a JSON document in a single column (deprecated)
- **JSON**: the [`JSON` object](./newjson.md) stores a JSON document in a single column
- **UUID**: a performant option for storing [`UUID` values](./uuid.md)
- **Low cardinality types**: use an [`Enum`](./enum.md) when you have a handful of unique values, or use [`LowCardinality`](./lowcardinality.md) when you have up to 10,000 unique values of a column
- **Arrays**: any column can be defined as an [`Array` of values](./array.md)
- **Maps**: use [`Map`](./map.md) for storing key/value pairs
- **Aggregation function types**: use [`SimpleAggregateFunction`](./simpleaggregatefunction.md) and [`AggregateFunction`](./aggregatefunction.md) for storing the intermediate status of aggregate function results
- **Nested data structures**: A [`Nested` data structure](./nested-data-structures/index.md) is like a table inside a cell
- **Tuples**: A [`Tuple` of elements](./tuple.md), each having an individual type.
- **Nullable**: [`Nullable`](./nullable.md) allows you to store a value as `NULL` when a value is "missing" (instead of the column settings its default value for the data type)
- **IP addresses**: use [`IPv4`](./ipv4.md) and [`IPv6`](./ipv6.md) to efficiently store IP addresses
- **Geo types**: for [geographical data](./geo.md), including `Point`, `Ring`, `Polygon` and `MultiPolygon`
- **Special data types**: including [`Expression`](./special-data-types/expression.md), [`Set`](./special-data-types/set.md), [`Nothing`](./special-data-types/nothing.md) and [`Interval`](./special-data-types/interval.md)
System table [system.data_type_families](../../operations/system-tables/data_type_families.md#system_tables-data_type_families) provides an
overview of all available data types.
It also shows whether a data type is an alias to another data type and its name is case-sensitive (e.g. `bool` vs. `BOOL`).

View File

@ -7,7 +7,7 @@ keywords: [object, data type]
# Object Data Type (deprecated)
**This feature is not production-ready and is now deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
**This feature is not production-ready and deprecated.** If you need to work with JSON documents, consider using [this guide](/docs/en/integrations/data-formats/json/overview) instead. A new implementation to support JSON object is in progress and can be tracked [here](https://github.com/ClickHouse/ClickHouse/issues/54864).
<hr />

View File

@ -58,10 +58,10 @@ SELECT json FROM test;
└───────────────────────────────────┘
```
Using CAST from 'String':
Using CAST from `String`:
```sql
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json;
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON AS json;
```
```text
@ -70,7 +70,47 @@ SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::JSON as json
└────────────────────────────────────────────────┘
```
CAST from `JSON`, named `Tuple`, `Map` and `Object('json')` to `JSON` type will be supported later.
Using CAST from `Tuple`:
```sql
SELECT (tuple(42 AS b) AS a, [1, 2, 3] AS c, 'Hello, World!' AS d)::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
Using CAST from `Map`:
```sql
SELECT map('a', map('b', 42), 'c', [1,2,3], 'd', 'Hello, World!')::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
Using CAST from deprecated `Object('json')`:
```sql
SELECT '{"a" : {"b" : 42},"c" : [1, 2, 3], "d" : "Hello, World!"}'::Object('json')::JSON AS json;
```
```text
┌─json───────────────────────────────────────────┐
│ {"a":{"b":42},"c":[1,2,3],"d":"Hello, World!"} │
└────────────────────────────────────────────────┘
```
:::note
CAST from `Tuple`/`Map`/`Object('json')` to `JSON` is implemented via serializing the column into `String` column containing JSON objects and deserializing it back to `JSON` type column.
:::
CAST between `JSON` types with different arguments will be supported later.
## Reading JSON paths as subcolumns
@ -630,6 +670,28 @@ SELECT arrayJoin(distinctJSONPathsAndTypes(json)) FROM s3('s3://clickhouse-publi
└─arrayJoin(distinctJSONPathsAndTypes(json))──────────────────┘
```
## ALTER MODIFY COLUMN to JSON type
It's possible to alter an existing table and change the type of the column to the new `JSON` type. Right now only alter from `String` type is supported.
**Example**
```sql
CREATE TABLE test (json String) ENGINE=MergeTree ORDeR BY tuple();
INSERT INTO test VALUES ('{"a" : 42}'), ('{"a" : 43, "b" : "Hello"}'), ('{"a" : 44, "b" : [1, 2, 3]}')), ('{"c" : "2020-01-01"}');
ALTER TABLE test MODIFY COLUMN json JSON;
SELECT json, json.a, json.b, json.c FROM test;
```
```text
┌─json─────────────────────────┬─json.a─┬─json.b──┬─json.c─────┐
│ {"a":"42"} │ 42 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │
│ {"a":"43","b":"Hello"} │ 43 │ Hello │ ᴺᵁᴸᴸ │
│ {"a":"44","b":["1","2","3"]} │ 44 │ [1,2,3] │ ᴺᵁᴸᴸ │
│ {"c":"2020-01-01"} │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ 2020-01-01 │
└──────────────────────────────┴────────┴─────────┴────────────┘
```
## Tips for better usage of the JSON type
Before creating `JSON` column and loading data into it, consider the following tips:

View File

@ -5,7 +5,9 @@ sidebar_label: SimpleAggregateFunction
---
# SimpleAggregateFunction
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value of the aggregate function, and does not store its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does. This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`. This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
`SimpleAggregateFunction(name, types_of_arguments...)` data type stores current value (intermediate state) of the aggregate function, but not its full state as [`AggregateFunction`](../../sql-reference/data-types/aggregatefunction.md) does.
This optimization can be applied to functions for which the following property holds: the result of applying a function `f` to a row set `S1 UNION ALL S2` can be obtained by applying `f` to parts of the row set separately, and then again applying `f` to the results: `f(S1 UNION ALL S2) = f(f(S1) UNION ALL f(S2))`.
This property guarantees that partial aggregation results are enough to compute the combined one, so we do not have to store and process any extra data.
The common way to produce an aggregate function value is by calling the aggregate function with the [-SimpleState](../../sql-reference/aggregate-functions/combinators.md#agg-functions-combinator-simplestate) suffix.

View File

@ -441,6 +441,8 @@ SELECT v, variantType(v) FROM test ORDER by v;
└─────┴────────────────┘
```
**Note** by default `Variant` type is not allowed in `GROUP BY`/`ORDER BY` keys, if you want to use it consider its special comparison rule and enable `allow_suspicious_types_in_group_by`/`allow_suspicious_types_in_order_by` settings.
## JSONExtract functions with Variant
All `JSONExtract*` functions support `Variant` type:

View File

@ -4489,9 +4489,9 @@ Using replacement fields, you can define a pattern for the resulting string.
| k | clockhour of day (1~24) | number | 24 |
| m | minute of hour | number | 30 |
| s | second of minute | number | 55 |
| S | fraction of second (not supported yet) | number | 978 |
| z | time zone (short name not supported yet) | text | Pacific Standard Time; PST |
| Z | time zone offset/id (not supported yet) | zone | -0800; -08:00; America/Los_Angeles |
| S | fraction of second | number | 978 |
| z | time zone | text | Eastern Standard Time; EST |
| Z | time zone offset | zone | -0800; -0812 |
| ' | escape for text | delimiter | |
| '' | single quote | literal | ' |
@ -4773,7 +4773,7 @@ Result:
## toUTCTimestamp
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp
Convert DateTime/DateTime64 type value from other time zone to UTC timezone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
**Syntax**
@ -4799,14 +4799,14 @@ SELECT toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai');
Result:
``` text
┌─toUTCTimestamp(toDateTime('2023-03-16'),'Asia/Shanghai')┐
┌─toUTCTimestamp(toDateTime('2023-03-16'), 'Asia/Shanghai')┐
│ 2023-03-15 16:00:00 │
└─────────────────────────────────────────────────────────┘
```
## fromUTCTimestamp
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp
Convert DateTime/DateTime64 type value from UTC timezone to other time zone timestamp. This function is mainly included for compatibility with Apache Spark and similar frameworks.
**Syntax**
@ -4832,7 +4832,7 @@ SELECT fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00', 3), 'Asia/Shanghai')
Result:
``` text
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3),'Asia/Shanghai')─┐
┌─fromUTCTimestamp(toDateTime64('2023-03-16 10:00:00',3), 'Asia/Shanghai')─┐
│ 2023-03-16 18:00:00.000 │
└─────────────────────────────────────────────────────────────────────────┘
```

View File

@ -5,70 +5,4 @@ sidebar_position: 62
title: "Geo Functions"
---
## Geographical Coordinates Functions
- [greatCircleDistance](./coordinates.md#greatcircledistance)
- [geoDistance](./coordinates.md#geodistance)
- [greatCircleAngle](./coordinates.md#greatcircleangle)
- [pointInEllipses](./coordinates.md#pointinellipses)
- [pointInPolygon](./coordinates.md#pointinpolygon)
## Geohash Functions
- [geohashEncode](./geohash.md#geohashencode)
- [geohashDecode](./geohash.md#geohashdecode)
- [geohashesInBox](./geohash.md#geohashesinbox)
## H3 Indexes Functions
- [h3IsValid](./h3.md#h3isvalid)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3EdgeAngle](./h3.md#h3edgeangle)
- [h3EdgeLengthM](./h3.md#h3edgelengthm)
- [h3EdgeLengthKm](./h3.md#h3edgelengthkm)
- [geoToH3](./h3.md#geotoh3)
- [h3ToGeo](./h3.md#h3togeo)
- [h3ToGeoBoundary](./h3.md#h3togeoboundary)
- [h3kRing](./h3.md#h3kring)
- [h3GetBaseCell](./h3.md#h3getbasecell)
- [h3HexAreaM2](./h3.md#h3hexaream2)
- [h3HexAreaKm2](./h3.md#h3hexareakm2)
- [h3IndexesAreNeighbors](./h3.md#h3indexesareneighbors)
- [h3ToChildren](./h3.md#h3tochildren)
- [h3ToParent](./h3.md#h3toparent)
- [h3ToString](./h3.md#h3tostring)
- [stringToH3](./h3.md#stringtoh3)
- [h3GetResolution](./h3.md#h3getresolution)
- [h3IsResClassIII](./h3.md#h3isresclassiii)
- [h3IsPentagon](./h3.md#h3ispentagon)
- [h3GetFaces](./h3.md#h3getfaces)
- [h3CellAreaM2](./h3.md#h3cellaream2)
- [h3CellAreaRads2](./h3.md#h3cellarearads2)
- [h3ToCenterChild](./h3.md#h3tocenterchild)
- [h3ExactEdgeLengthM](./h3.md#h3exactedgelengthm)
- [h3ExactEdgeLengthKm](./h3.md#h3exactedgelengthkm)
- [h3ExactEdgeLengthRads](./h3.md#h3exactedgelengthrads)
- [h3NumHexagons](./h3.md#h3numhexagons)
- [h3Line](./h3.md#h3line)
- [h3Distance](./h3.md#h3distance)
- [h3HexRing](./h3.md#h3hexring)
- [h3GetUnidirectionalEdge](./h3.md#h3getunidirectionaledge)
- [h3UnidirectionalEdgeIsValid](./h3.md#h3unidirectionaledgeisvalid)
- [h3GetOriginIndexFromUnidirectionalEdge](./h3.md#h3getoriginindexfromunidirectionaledge)
- [h3GetDestinationIndexFromUnidirectionalEdge](./h3.md#h3getdestinationindexfromunidirectionaledge)
- [h3GetIndexesFromUnidirectionalEdge](./h3.md#h3getindexesfromunidirectionaledge)
- [h3GetUnidirectionalEdgesFromHexagon](./h3.md#h3getunidirectionaledgesfromhexagon)
- [h3GetUnidirectionalEdgeBoundary](./h3.md#h3getunidirectionaledgeboundary)
## S2 Index Functions
- [geoToS2](./s2.md#geotos2)
- [s2ToGeo](./s2.md#s2togeo)
- [s2GetNeighbors](./s2.md#s2getneighbors)
- [s2CellsIntersect](./s2.md#s2cellsintersect)
- [s2CapContains](./s2.md#s2capcontains)
- [s2CapUnion](./s2.md#s2capunion)
- [s2RectAdd](./s2.md#s2rectadd)
- [s2RectContains](./s2.md#s2rectcontains)
- [s2RectUnion](./s2.md#s2rectunion)
- [s2RectIntersection](./s2.md#s2rectintersection)
Functions for working with geometric objects, for example [to calculate distances between points on a sphere](./coordinates.md), [compute geohashes](./geohash.md), and work with [h3 indexes](./h3.md).

View File

@ -24,7 +24,7 @@ All expressions in a query that have the same AST (the same record or same resul
## Types of Results
All functions return a single return as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
All functions return a single value as the result (not several values, and not zero values). The type of result is usually defined only by the types of arguments, not by the values. Exceptions are the tupleElement function (the a.N operator), and the toFixedString function.
## Constants

View File

@ -6791,7 +6791,7 @@ parseDateTime(str[, format[, timezone]])
**Returned value(s)**
Returns DateTime values parsed from input string according to a MySQL style format string.
Return a [DateTime](../data-types/datetime.md) value parsed from the input string according to a MySQL-style format string.
**Supported format specifiers**
@ -6840,7 +6840,7 @@ parseDateTimeInJodaSyntax(str[, format[, timezone]])
**Returned value(s)**
Returns DateTime values parsed from input string according to a Joda style format.
Return a [DateTime](../data-types/datetime.md) value parsed from the input string according to a Joda-style format string.
**Supported format specifiers**
@ -6867,9 +6867,55 @@ Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that
Same as for [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [MySQL format string](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-functions.html#function_date-format).
**Syntax**
``` sql
parseDateTime64(str[, format[, timezone]])
```
**Arguments**
- `str` — The String to be parsed.
- `format` — The format string. Optional. `%Y-%m-%d %H:%i:%s.%f` if not specified.
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
**Returned value(s)**
Return a [DateTime64](../data-types/datetime64.md) value parsed from the input string according to a MySQL-style format string.
The precision of the returned value is 6.
## parseDateTime64OrZero
Same as for [parseDateTime64](#parsedatetime64) except that it returns zero date when it encounters a date format that cannot be processed.
## parseDateTime64OrNull
Same as for [parseDateTime64](#parsedatetime64) except that it returns `NULL` when it encounters a date format that cannot be processed.
## parseDateTime64InJodaSyntax
Similar to [parseDateTimeInJodaSyntax](#parsedatetimeinjodasyntax). Differently, it returns a value of type [DateTime64](../data-types/datetime64.md).
Converts a [String](../data-types/string.md) to [DateTime64](../data-types/datetime64.md) according to a [Joda format string](https://joda-time.sourceforge.net/apidocs/org/joda/time/format/DateTimeFormat.html).
**Syntax**
``` sql
parseDateTime64InJodaSyntax(str[, format[, timezone]])
```
**Arguments**
- `str` — The String to be parsed.
- `format` — The format string. Optional. `yyyy-MM-dd HH:mm:ss` if not specified.
- `timezone` — [Timezone](/docs/en/operations/server-configuration-parameters/settings.md#timezone). Optional.
**Returned value(s)**
Return a [DateTime64](../data-types/datetime64.md) value parsed from the input string according to a Joda-style format string.
The precision of the returned value equal to the number of `S` placeholders in the format string (but at most 6).
## parseDateTime64InJodaSyntaxOrZero

View File

@ -279,7 +279,7 @@ For columns with a new or updated `MATERIALIZED` value expression, all existing
For columns with a new or updated `DEFAULT` value expression, the behavior depends on the ClickHouse version:
- In ClickHouse < v24.2, all existing rows are rewritten.
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
- ClickHouse >= v24.2 distinguishes if a row value in a column with `DEFAULT` value expression was explicitly specified when it was inserted, or not, i.e. calculated from the `DEFAULT` value expression. If the value was explicitly specified, ClickHouse keeps it as is. If the value was calculated, ClickHouse changes it to the new or updated `MATERIALIZED` value expression.
Syntax:

View File

@ -0,0 +1,46 @@
---
slug: /en/sql-reference/statements/check-grant
sidebar_position: 56
sidebar_label: CHECK GRANT
title: "CHECK GRANT Statement"
---
The `CHECK GRANT` query is used to check whether the current user/role has been granted a specific privilege.
## Syntax
The basic syntax of the query is as follows:
```sql
CHECK GRANT privilege[(column_name [,...])] [,...] ON {db.table[*]|db[*].*|*.*|table[*]|*}
```
- `privilege` — Type of privilege.
## Examples
If the user used to be granted the privilege, the response`check_grant` will be `1`. Otherwise, the response `check_grant` will be `0`.
If `table_1.col1` exists and current user is granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `1`.
```sql
CHECK GRANT SELECT(col1) ON table_1;
```
```text
┌─result─┐
│ 1 │
└────────┘
```
If `table_2.col2` doesn't exists, or current user is not granted by privilege `SELECT`/`SELECT(con)` or role(with privilege), the response is `0`.
```sql
CHECK GRANT SELECT(col2) ON table_2;
```
```text
┌─result─┐
│ 0 │
└────────┘
```
## Wildcard
Specifying privileges you can use asterisk (`*`) instead of a table or a database name. Please check [WILDCARD GRANTS](../../sql-reference/statements/grant.md#wildcard-grants) for wildcard rules.

View File

@ -6,16 +6,4 @@ sidebar_label: CREATE
# CREATE Queries
Create queries make a new entity of one of the following kinds:
- [DATABASE](/docs/en/sql-reference/statements/create/database.md)
- [TABLE](/docs/en/sql-reference/statements/create/table.md)
- [VIEW](/docs/en/sql-reference/statements/create/view.md)
- [DICTIONARY](/docs/en/sql-reference/statements/create/dictionary.md)
- [FUNCTION](/docs/en/sql-reference/statements/create/function.md)
- [USER](/docs/en/sql-reference/statements/create/user.md)
- [ROLE](/docs/en/sql-reference/statements/create/role.md)
- [ROW POLICY](/docs/en/sql-reference/statements/create/row-policy.md)
- [QUOTA](/docs/en/sql-reference/statements/create/quota.md)
- [SETTINGS PROFILE](/docs/en/sql-reference/statements/create/settings-profile.md)
- [NAMED COLLECTION](/docs/en/sql-reference/statements/create/named-collection.md)
CREATE queries create (for example) new [databases](/docs/en/sql-reference/statements/create/database.md), [tables](/docs/en/sql-reference/statements/create/table.md) and [views](/docs/en/sql-reference/statements/create/view.md).

View File

@ -161,6 +161,8 @@ Settings:
- `actions` — Prints detailed information about step actions. Default: 0.
- `json` — Prints query plan steps as a row in [JSON](../../interfaces/formats.md#json) format. Default: 0. It is recommended to use [TSVRaw](../../interfaces/formats.md#tabseparatedraw) format to avoid unnecessary escaping.
When `json=1` step names will contain an additional suffix with unique step identifier.
Example:
```sql
@ -194,30 +196,25 @@ EXPLAIN json = 1, description = 0 SELECT 1 UNION ALL SELECT 2 FORMAT TSVRaw;
{
"Plan": {
"Node Type": "Union",
"Node Id": "Union_10",
"Plans": [
{
"Node Type": "Expression",
"Node Id": "Expression_13",
"Plans": [
{
"Node Type": "SettingQuotaAndLimits",
"Plans": [
{
"Node Type": "ReadFromStorage"
}
]
"Node Type": "ReadFromStorage",
"Node Id": "ReadFromStorage_0"
}
]
},
{
"Node Type": "Expression",
"Node Id": "Expression_16",
"Plans": [
{
"Node Type": "SettingQuotaAndLimits",
"Plans": [
{
"Node Type": "ReadFromStorage"
}
]
"Node Type": "ReadFromStorage",
"Node Id": "ReadFromStorage_4"
}
]
}
@ -249,6 +246,7 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
{
"Plan": {
"Node Type": "Expression",
"Node Id": "Expression_5",
"Header": [
{
"Name": "1",
@ -261,23 +259,13 @@ EXPLAIN json = 1, description = 0, header = 1 SELECT 1, 2 + dummy;
],
"Plans": [
{
"Node Type": "SettingQuotaAndLimits",
"Node Type": "ReadFromStorage",
"Node Id": "ReadFromStorage_0",
"Header": [
{
"Name": "dummy",
"Type": "UInt8"
}
],
"Plans": [
{
"Node Type": "ReadFromStorage",
"Header": [
{
"Name": "dummy",
"Type": "UInt8"
}
]
}
]
}
]
@ -351,17 +339,31 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
{
"Plan": {
"Node Type": "Expression",
"Node Id": "Expression_5",
"Expression": {
"Inputs": [],
"Inputs": [
{
"Name": "dummy",
"Type": "UInt8"
}
],
"Actions": [
{
"Node Type": "Column",
"Node Type": "INPUT",
"Result Type": "UInt8",
"Result Type": "Column",
"Result Name": "dummy",
"Arguments": [0],
"Removed Arguments": [0],
"Result": 0
},
{
"Node Type": "COLUMN",
"Result Type": "UInt8",
"Result Name": "1",
"Column": "Const(UInt8)",
"Arguments": [],
"Removed Arguments": [],
"Result": 0
"Result": 1
}
],
"Outputs": [
@ -370,17 +372,12 @@ EXPLAIN json = 1, actions = 1, description = 0 SELECT 1 FORMAT TSVRaw;
"Type": "UInt8"
}
],
"Positions": [0],
"Project Input": true
"Positions": [1]
},
"Plans": [
{
"Node Type": "SettingQuotaAndLimits",
"Plans": [
{
"Node Type": "ReadFromStorage"
}
]
"Node Type": "ReadFromStorage",
"Node Id": "ReadFromStorage_0"
}
]
}
@ -396,6 +393,8 @@ Settings:
- `graph` — Prints a graph described in the [DOT](https://en.wikipedia.org/wiki/DOT_(graph_description_language)) graph description language. Default: 0.
- `compact` — Prints graph in compact mode if `graph` setting is enabled. Default: 1.
When `compact=0` and `graph=1` processor names will contain an additional suffix with unique processor identifier.
Example:
```sql

View File

@ -6,27 +6,4 @@ sidebar_label: List of statements
# ClickHouse SQL Statements
Statements represent various kinds of action you can perform using SQL queries. Each kind of statement has its own syntax and usage details that are described separately:
- [SELECT](/docs/en/sql-reference/statements/select/index.md)
- [INSERT INTO](/docs/en/sql-reference/statements/insert-into.md)
- [CREATE](/docs/en/sql-reference/statements/create/index.md)
- [ALTER](/docs/en/sql-reference/statements/alter/index.md)
- [SYSTEM](/docs/en/sql-reference/statements/system.md)
- [SHOW](/docs/en/sql-reference/statements/show.md)
- [GRANT](/docs/en/sql-reference/statements/grant.md)
- [REVOKE](/docs/en/sql-reference/statements/revoke.md)
- [ATTACH](/docs/en/sql-reference/statements/attach.md)
- [CHECK TABLE](/docs/en/sql-reference/statements/check-table.md)
- [DESCRIBE TABLE](/docs/en/sql-reference/statements/describe-table.md)
- [DETACH](/docs/en/sql-reference/statements/detach.md)
- [DROP](/docs/en/sql-reference/statements/drop.md)
- [EXISTS](/docs/en/sql-reference/statements/exists.md)
- [KILL](/docs/en/sql-reference/statements/kill.md)
- [OPTIMIZE](/docs/en/sql-reference/statements/optimize.md)
- [RENAME](/docs/en/sql-reference/statements/rename.md)
- [SET](/docs/en/sql-reference/statements/set.md)
- [SET ROLE](/docs/en/sql-reference/statements/set-role.md)
- [TRUNCATE](/docs/en/sql-reference/statements/truncate.md)
- [USE](/docs/en/sql-reference/statements/use.md)
- [EXPLAIN](/docs/en/sql-reference/statements/explain.md)
Users interact with ClickHouse using SQL statements. ClickHouse supports common SQL statements like [SELECT](select/index.md) and [CREATE](create/index.md), but it also provides specialized statements like [KILL](kill.md) and [OPTIMIZE](optimize.md).

View File

@ -5,9 +5,14 @@ sidebar_label: EXCEPT
# EXCEPT Clause
The `EXCEPT` clause returns only those rows that result from the first query without the second. The queries must match the number of columns, order, and type. The result of `EXCEPT` can contain duplicate rows.
The `EXCEPT` clause returns only those rows that result from the first query without the second.
Multiple `EXCEPT` statements are executed left to right if parenthesis are not specified. The `EXCEPT` operator has the same priority as the `UNION` clause and lower priority than the `INTERSECT` clause.
- Both queries must have the same number of columns in the same order and data type.
- The result of `EXCEPT` can contain duplicate rows. Use `EXCEPT DISTINCT` if this is not desirable.
- Multiple `EXCEPT` statements are executed from left to right if parentheses are not specified.
- The `EXCEPT` operator has the same priority as the `UNION` clause and lower priority than the `INTERSECT` clause.
## Syntax
``` sql
SELECT column1 [, column2 ]
@ -19,18 +24,33 @@ EXCEPT
SELECT column1 [, column2 ]
FROM table2
[WHERE condition]
```
The condition could be any expression based on your requirements.
The condition could be any expression based on your requirements.
Additionally, `EXCEPT()` can be used to exclude columns from a result in the same table, as is possible with BigQuery (Google Cloud), using the following syntax:
```sql
SELECT column1 [, column2 ] EXCEPT (column3 [, column4])
FROM table1
[WHERE condition]
```
## Examples
The examples in this section demonstrate usage of the `EXCEPT` clause.
### Filtering Numbers Using the `EXCEPT` Clause
Here is a simple example that returns the numbers 1 to 10 that are _not_ a part of the numbers 3 to 8:
Query:
``` sql
SELECT number FROM numbers(1,10) EXCEPT SELECT number FROM numbers(3,6);
SELECT number
FROM numbers(1, 10)
EXCEPT
SELECT number
FROM numbers(3, 6)
```
Result:
@ -44,7 +64,53 @@ Result:
└────────┘
```
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns). For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
### Excluding Specific Columns Using `EXCEPT()`
`EXCEPT()` can be used to quickly exclude columns from a result. For instance if we want to select all columns from a table, except a few select columns as shown in the example below:
Query:
```sql
SHOW COLUMNS IN system.settings
SELECT * EXCEPT (default, alias_for, readonly, description)
FROM system.settings
LIMIT 5
```
Result:
```response
┌─field───────┬─type─────────────────────────────────────────────────────────────────────┬─null─┬─key─┬─default─┬─extra─┐
1. │ alias_for │ String │ NO │ │ ᴺᵁᴸᴸ │ │
2. │ changed │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
3. │ default │ String │ NO │ │ ᴺᵁᴸᴸ │ │
4. │ description │ String │ NO │ │ ᴺᵁᴸᴸ │ │
5. │ is_obsolete │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
6. │ max │ Nullable(String) │ YES │ │ ᴺᵁᴸᴸ │ │
7. │ min │ Nullable(String) │ YES │ │ ᴺᵁᴸᴸ │ │
8. │ name │ String │ NO │ │ ᴺᵁᴸᴸ │ │
9. │ readonly │ UInt8 │ NO │ │ ᴺᵁᴸᴸ │ │
10. │ tier │ Enum8('Production' = 0, 'Obsolete' = 4, 'Experimental' = 8, 'Beta' = 12) │ NO │ │ ᴺᵁᴸᴸ │ │
11. │ type │ String │ NO │ │ ᴺᵁᴸᴸ │ │
12. │ value │ String │ NO │ │ ᴺᵁᴸᴸ │ │
└─────────────┴──────────────────────────────────────────────────────────────────────────┴──────┴─────┴─────────┴───────┘
┌─name────────────────────┬─value──────┬─changed─┬─min──┬─max──┬─type────┬─is_obsolete─┬─tier───────┐
1. │ dialect │ clickhouse │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dialect │ 0 │ Production │
2. │ min_compress_block_size │ 65536 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
3. │ max_compress_block_size │ 1048576 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
4. │ max_block_size │ 65409 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
5. │ max_insert_block_size │ 1048449 │ 0 │ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ UInt64 │ 0 │ Production │
└─────────────────────────┴────────────┴─────────┴──────┴──────┴─────────┴─────────────┴────────────┘
```
### Using `EXCEPT` and `INTERSECT` with Cryptocurrency Data
`EXCEPT` and `INTERSECT` can often be used interchangeably with different Boolean logic, and they are both useful if you have two tables that share a common column (or columns).
For example, suppose we have a few million rows of historical cryptocurrency data that contains trade prices and volume:
Query:
```sql
CREATE TABLE crypto_prices
@ -72,6 +138,8 @@ ORDER BY trade_date DESC
LIMIT 10;
```
Result:
```response
┌─trade_date─┬─crypto_name─┬──────volume─┬────price─┬───market_cap─┬──change_1_day─┐
│ 2020-11-02 │ Bitcoin │ 30771456000 │ 13550.49 │ 251119860000 │ -0.013585099 │
@ -127,7 +195,7 @@ Result:
This means of the four cryptocurrencies we own, only Bitcoin has never dropped below $10 (based on the limited data we have here in this example).
## EXCEPT DISTINCT
### Using `EXCEPT DISTINCT`
Notice in the previous query we had multiple Bitcoin holdings in the result. You can add `DISTINCT` to `EXCEPT` to eliminate duplicate rows from the result:
@ -146,7 +214,6 @@ Result:
└─────────────┘
```
**See Also**
- [UNION](union.md#union-clause)

View File

@ -291,7 +291,7 @@ All missed values of `expr` column will be filled sequentially and other columns
To fill multiple columns, add `WITH FILL` modifier with optional parameters after each field name in `ORDER BY` section.
``` sql
ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr]
ORDER BY expr [WITH FILL] [FROM const_expr] [TO const_expr] [STEP const_numeric_expr] [STALENESS const_numeric_expr], ... exprN [WITH FILL] [FROM expr] [TO expr] [STEP numeric_expr] [STALENESS numeric_expr]
[INTERPOLATE [(col [AS expr], ... colN [AS exprN])]]
```
@ -300,6 +300,7 @@ When `FROM const_expr` not defined sequence of filling use minimal `expr` field
When `TO const_expr` not defined sequence of filling use maximum `expr` field value from `ORDER BY`.
When `STEP const_numeric_expr` defined then `const_numeric_expr` interprets `as is` for numeric types, as `days` for Date type, as `seconds` for DateTime type. It also supports [INTERVAL](https://clickhouse.com/docs/en/sql-reference/data-types/special-data-types/interval/) data type representing time and date intervals.
When `STEP const_numeric_expr` omitted then sequence of filling use `1.0` for numeric type, `1 day` for Date type and `1 second` for DateTime type.
When `STALENESS const_numeric_expr` is defined, the query will generate rows until the difference from the previous row in the original data exceeds `const_numeric_expr`.
`INTERPOLATE` can be applied to columns not participating in `ORDER BY WITH FILL`. Such columns are filled based on previous fields values by applying `expr`. If `expr` is not present will repeat previous value. Omitted list will result in including all allowed columns.
Example of a query without `WITH FILL`:
@ -497,6 +498,64 @@ Result:
└────────────┴────────────┴──────────┘
```
Example of a query without `STALENESS`:
``` sql
SELECT number as key, 5 * number value, 'original' AS source
FROM numbers(16) WHERE key % 5 == 0
ORDER BY key WITH FILL;
```
Result:
``` text
┌─key─┬─value─┬─source───┐
1. │ 0 │ 0 │ original │
2. │ 1 │ 0 │ │
3. │ 2 │ 0 │ │
4. │ 3 │ 0 │ │
5. │ 4 │ 0 │ │
6. │ 5 │ 25 │ original │
7. │ 6 │ 0 │ │
8. │ 7 │ 0 │ │
9. │ 8 │ 0 │ │
10. │ 9 │ 0 │ │
11. │ 10 │ 50 │ original │
12. │ 11 │ 0 │ │
13. │ 12 │ 0 │ │
14. │ 13 │ 0 │ │
15. │ 14 │ 0 │ │
16. │ 15 │ 75 │ original │
└─────┴───────┴──────────┘
```
Same query after applying `STALENESS 3`:
``` sql
SELECT number as key, 5 * number value, 'original' AS source
FROM numbers(16) WHERE key % 5 == 0
ORDER BY key WITH FILL STALENESS 3;
```
Result:
``` text
┌─key─┬─value─┬─source───┐
1. │ 0 │ 0 │ original │
2. │ 1 │ 0 │ │
3. │ 2 │ 0 │ │
4. │ 5 │ 25 │ original │
5. │ 6 │ 0 │ │
6. │ 7 │ 0 │ │
7. │ 10 │ 50 │ original │
8. │ 11 │ 0 │ │
9. │ 12 │ 0 │ │
10. │ 15 │ 75 │ original │
11. │ 16 │ 0 │ │
12. │ 17 │ 0 │ │
└─────┴───────┴──────────┘
```
Example of a query without `INTERPOLATE`:
``` sql

View File

@ -15,7 +15,7 @@ first_value (column_name) [[RESPECT NULLS] | [IGNORE NULLS]]
OVER ([[PARTITION BY grouping_column] [ORDER BY sorting_column]
[ROWS or RANGE expression_to_bound_rows_withing_the_group]] | [window_name])
FROM table_name
WINDOW window_name as ([[PARTITION BY grouping_column] [ORDER BY sorting_column])
WINDOW window_name as ([PARTITION BY grouping_column] [ORDER BY sorting_column])
```
Alias: `any`.
@ -23,6 +23,8 @@ Alias: `any`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
Alias: `firstValueRespectNulls`
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
@ -48,7 +50,7 @@ CREATE TABLE salaries
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
INSERT INTO salaries FORMAT VALUES
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),

View File

@ -23,6 +23,8 @@ Alias: `anyLast`.
:::note
Using the optional modifier `RESPECT NULLS` after `first_value(column_name)` will ensure that `NULL` arguments are not skipped.
See [NULL processing](../aggregate-functions/index.md/#null-processing) for more information.
Alias: `lastValueRespectNulls`
:::
For more detail on window function syntax see: [Window Functions - Syntax](./index.md/#syntax).
@ -33,7 +35,7 @@ For more detail on window function syntax see: [Window Functions - Syntax](./ind
**Example**
In this example the `last_value` function is used to find the highest paid footballer from a fictional dataset of salaries of Premier League football players.
In this example the `last_value` function is used to find the lowest paid footballer from a fictional dataset of salaries of Premier League football players.
Query:
@ -48,7 +50,7 @@ CREATE TABLE salaries
)
Engine = Memory;
INSERT INTO salaries FORMAT Values
INSERT INTO salaries FORMAT VALUES
('Port Elizabeth Barbarians', 'Gary Chen', 196000, 'F'),
('New Coreystad Archdukes', 'Charles Juarez', 190000, 'F'),
('Port Elizabeth Barbarians', 'Michael Stanley', 100000, 'D'),

View File

@ -0,0 +1,8 @@
---
sidebar_label: 招待
title: 招待
---
## すべての招待を一覧表示
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。

View File

@ -0,0 +1,9 @@
---
sidebar_label: キー
title: キー
---
## すべてのキーのリストを取得する
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。
内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。

View File

@ -0,0 +1,8 @@
---
sidebar_label: メンバー
title: メンバー
---
## 組織メンバーの一覧
このファイルはビルドプロセス中に`clickhouseapi.js`によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js`を編集してください。

View File

@ -0,0 +1,8 @@
---
sidebar_label: 組織
title: 組織
---
## 組織の詳細を取得する
このファイルはビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。

View File

@ -0,0 +1,8 @@
---
sidebar_label: サービス
title: サービス
---
## 組織サービスの一覧
このファイルは、ビルドプロセス中に `clickhouseapi.js` によって生成されます。内容を変更する必要がある場合は、`clickhouseapi.js` を編集してください。

View File

@ -0,0 +1,8 @@
---
slug: /ja/whats-new/changelog/
sidebar_position: 2
sidebar_label: 2024
title: 2024 Changelog
note: このファイルは `yarn new-build` によって自動生成されます。
---

View File

@ -0,0 +1,41 @@
<details><summary>GCS バケットと HMAC キーを作成する</summary>
### ch_bucket_us_east1
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-bucket-1.png)
### ch_bucket_us_east4
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-bucket-2.png)
### アクセスキーを生成する
### サービスアカウントの HMAC キーとシークレットを作成する
**Cloud Storage > Settings > Interoperability** を開き、既存の **Access key** を選択するか、**CREATE A KEY FOR A SERVICE ACCOUNT** を選択します。このガイドでは、新しいサービスアカウントの新しいキーを作成する手順を説明します。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-a-service-account-key.png)
### 新しいサービスアカウントを追加する
すでにサービスアカウントが存在しないプロジェクトの場合は、**CREATE NEW ACCOUNT** をクリックします。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-0.png)
サービスアカウントを作成するには3つのステップがあります。最初のステップでは、アカウントに意味のある名前、ID、説明を付けます。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-a.png)
Interoperability 設定ダイアログでは、IAM ロールとして **Storage Object Admin** ロールが推奨されます。ステップ2でそのロールを選択します。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-2.png)
ステップ3はオプションであり、このガイドでは使用しません。ポリシーに基づいて、ユーザーにこれらの特権を与えることができます。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-create-service-account-3.png)
サービスアカウントの HMAC キーが表示されます。この情報を保存してください。ClickHouse の設定で使用します。
![バケットを追加](@site/docs/ja/integrations/data-ingestion/s3/images/GCS-guide-key.png)
</details>

View File

@ -0,0 +1,132 @@
<details><summary>S3バケットとIAMユーザーの作成</summary>
この記事では、AWS IAMユーザーを設定し、S3バケットを作成し、ClickHouseをそのバケットをS3ディスクとして使用するように設定する基本を説明しています。使用する権限を決定するためにセキュリティチームと協力し、これらを出発点として考えてください。
### AWS IAMユーザーの作成
この手順では、ログインユーザーではなくサービスアカウントユーザーを作成します。
1. AWS IAM 管理コンソールにログインします。
2. 「ユーザー」で、**ユーザーを追加** を選択します。
![create_iam_user_0](@site/docs/ja/_snippets/images/s3/s3-1.png)
3. ユーザー名を入力し、資格情報の種類を **アクセスキー - プログラムによるアクセス** に設定し、**次: 権限** を選択します。
![create_iam_user_1](@site/docs/ja/_snippets/images/s3/s3-2.png)
4. ユーザーをグループに追加せず、**次: タグ** を選択します。
![create_iam_user_2](@site/docs/ja/_snippets/images/s3/s3-3.png)
5. タグを追加する必要がなければ、**次: 確認** を選択します。
![create_iam_user_3](@site/docs/ja/_snippets/images/s3/s3-4.png)
6. **ユーザーを作成** を選択します。
:::note
ユーザーに権限がないという警告メッセージは無視できます。次のセクションでバケットに対してユーザーに権限が付与されます。
:::
![create_iam_user_4](@site/docs/ja/_snippets/images/s3/s3-5.png)
7. ユーザーが作成されました。**表示** をクリックし、アクセスキーとシークレットキーをコピーします。
:::note
これがシークレットアクセスキーが利用可能な唯一のタイミングですので、キーを別の場所に保存してください。
:::
![create_iam_user_5](@site/docs/ja/_snippets/images/s3/s3-6.png)
8. 閉じるをクリックし、ユーザー画面でそのユーザーを見つけます。
![create_iam_user_6](@site/docs/ja/_snippets/images/s3/s3-7.png)
9. ARNAmazon Resource Nameをコピーし、バケットのアクセスポリシーを設定する際に使用するために保存します。
![create_iam_user_7](@site/docs/ja/_snippets/images/s3/s3-8.png)
### S3バケットの作成
1. S3バケットセクションで、**バケットの作成** を選択します。
![create_s3_bucket_0](@site/docs/ja/_snippets/images/s3/s3-9.png)
2. バケット名を入力し、他のオプションはデフォルトのままにします。
:::note
バケット名はAWS全体で一意である必要があります。組織内だけでなく、一意でない場合はエラーが発生します。
:::
3. `すべてのパブリックアクセスをブロック` を有効のままにします。パブリックアクセスは必要ありません。
![create_s3_bucket_2](@site/docs/ja/_snippets/images/s3/s3-a.png)
4. ページの下部にある **バケットの作成** を選択します。
![create_s3_bucket_3](@site/docs/ja/_snippets/images/s3/s3-b.png)
5. リンクを選択し、ARNをコピーして、バケットのアクセスポリシーを設定するときに使用するために保存します。
6. バケットが作成されたら、S3バケットリストで新しいS3バケットを見つけ、リンクを選択します。
![create_s3_bucket_4](@site/docs/ja/_snippets/images/s3/s3-c.png)
7. **フォルダを作成** を選択します。
![create_s3_bucket_5](@site/docs/ja/_snippets/images/s3/s3-d.png)
8. ClickHouse S3ディスクのターゲットとなるフォルダ名を入力し、**フォルダを作成** を選択します。
![create_s3_bucket_6](@site/docs/ja/_snippets/images/s3/s3-e.png)
9. フォルダがバケットリストに表示されるはずです。
![create_s3_bucket_7](@site/docs/ja/_snippets/images/s3/s3-f.png)
10. 新しいフォルダのチェックボックスを選択し、**URLをコピー** をクリックします。コピーしたURLは、次のセクションでのClickHouseストレージ設定で使用します。
![create_s3_bucket_8](@site/docs/ja/_snippets/images/s3/s3-g.png)
11. **権限** タブを選択し、**バケットポリシー** セクションの **編集** ボタンをクリックします。
![create_s3_bucket_9](@site/docs/ja/_snippets/images/s3/s3-h.png)
12. 以下の例のようにバケットポリシーを追加します:
```json
{
"Version": "2012-10-17",
"Id": "Policy123456",
"Statement": [
{
"Sid": "abc123",
"Effect": "Allow",
"Principal": {
"AWS": "arn:aws:iam::921234567898:user/mars-s3-user"
},
"Action": "s3:*",
"Resource": [
"arn:aws:s3:::mars-doc-test",
"arn:aws:s3:::mars-doc-test/*"
]
}
]
}
```
```response
|パラメータ | 説明 | 例 |
|----------|-------------|----------------|
|Version | ポリシーインタープリタのバージョン、そのままにしておく | 2012-10-17 |
|Sid | ユーザー定義のポリシーID | abc123 |
|Effect | ユーザー要求が許可されるか拒否されるか | Allow |
|Principal | 許可されるアカウントまたはユーザー | arn:aws:iam::921234567898:user/mars-s3-user |
|Action | バケット上で許可される操作| s3:*|
|Resource | バケット内で操作が許可されるリソース | "arn:aws:s3:::mars-doc-test", "arn:aws:s3:::mars-doc-test/*" |
```
:::note
使用する権限を決定するためにセキュリティチームと協力し、これらを出発点として考えてください。
ポリシーと設定の詳細については、AWSドキュメントをご参照ください
https://docs.aws.amazon.com/AmazonS3/latest/userguide/access-policy-language-overview.html
:::
13. ポリシー設定を保存します。
</details>

View File

@ -0,0 +1,11 @@
<details><summary>IPアクセスリストを管理する</summary>
ClickHouse Cloudのサービスリストから作業するサービスを選択し、**セキュリティ**に切り替えます。IPアクセスリストに、ClickHouse Cloudサービスに接続する必要があるリモートシステムのIPアドレスや範囲が含まれていない場合は、**エントリを追加**して問題を解決できます。
![サービスがトラフィックを許可しているか確認](@site/docs/ja/_snippets/images/ip-allow-list-check-list.png)
ClickHouse Cloudサービスに接続する必要がある個別のIPアドレス、またはアドレスの範囲を追加します。フォームを適宜修正し、**エントリを追加**し、**エントリを送信**します。
![現在のIPアドレスを追加](@site/docs/ja/_snippets/images/ip-allow-list-add-current-ip.png)
</details>

View File

@ -0,0 +1,45 @@
<details><summary>DockerでApache Supersetを起動</summary>
Supersetは、[Docker Composeを使用してローカルにSupersetをインストールする](https://superset.apache.org/docs/installation/installing-superset-using-docker-compose/)手順を提供しています。GitHubからApache Supersetリポジトリをチェックアウトした後、最新の開発コードや特定のタグを実行することができます。`pre-release`としてマークされていない最新のリリースである2.0.0をお勧めします。
`docker compose`を実行する前にいくつかのタスクを行う必要があります:
1. 公式のClickHouse Connectドライバーを追加
2. MapBox APIキーを取得し、それを環境変数として追加任意
3. 実行するSupersetのバージョンを指定
:::tip
以下のコマンドはGitHubリポジトリのトップレベル、`superset`から実行してください。
:::
## 公式ClickHouse Connectドライバー
SupersetデプロイメントでClickHouse Connectドライバーを利用可能にするために、ローカルのrequirementsファイルに追加します
```bash
echo "clickhouse-connect" >> ./docker/requirements-local.txt
```
## MapBox
これは任意です。MapBox APIキーなしでSupersetで位置データをプロットできますが、キーを追加するべきというメッセージが表示され、地図の背景画像が欠けますデータポイントのみが表示され、地図の背景は表示されません。MapBoxは無料のティアを提供していますので、利用したい場合はぜひご利用ください。
ガイドが作成するサンプルの可視化の一部は、例えば経度や緯度データなどの位置情報を使用します。SupersetはMapBoxマップのサポートを含んでいます。MapBoxの可視化を使用するには、MapBox APIキーが必要です。[MapBoxの無料ティア](https://account.mapbox.com/auth/signup/)にサインアップし、APIキーを生成してください。
APIキーをSupersetで利用可能にします
```bash
echo "MAPBOX_API_KEY=pk.SAMPLE-Use-your-key-instead" >> docker/.env-non-dev
```
## Supersetバージョン2.0.0をデプロイ
リリース2.0.0をデプロイするには、以下を実行します:
```bash
git checkout 2.0.0
TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml pull
TAG=2.0.0 docker-compose -f docker-compose-non-dev.yml up
```
</details>

View File

@ -0,0 +1,11 @@
| 地域 | VPC サービス名 | アベイラビリティーゾーン ID |
|------------------|--------------------------------------------------------------------|------------------------------|
|ap-south-1 | com.amazonaws.vpce.ap-south-1.vpce-svc-0a786406c7ddc3a1b | aps1-az1 aps1-az2 aps1-az3 |
|ap-southeast-1 | com.amazonaws.vpce.ap-southeast-1.vpce-svc-0a8b096ec9d2acb01 | apse1-az1 apse1-az2 apse1-az3|
|ap-southeast-2 | com.amazonaws.vpce.ap-southeast-2.vpce-svc-0ca446409b23f0c01 | apse2-az1 apse2-az2 apse2-az3|
|eu-central-1 | com.amazonaws.vpce.eu-central-1.vpce-svc-0536fc4b80a82b8ed | euc1-az2 euc1-az3 euc1-az1 |
|eu-west-1 | com.amazonaws.vpce.eu-west-1.vpce-svc-066b03c9b5f61c6fc | euw1-az2 euw1-az3 euw1-az1 |
|us-east-1 c0 | com.amazonaws.vpce.us-east-1.vpce-svc-0a0218fa75c646d81 | use1-az6 use1-az1 use1-az2 |
|us-east-1 c1 | com.amazonaws.vpce.us-east-1.vpce-svc-096c118db1ff20ea4 | use1-az6 use1-az4 use1-az2 |
|us-east-2 | com.amazonaws.vpce.us-east-2.vpce-svc-0b99748bf269a86b4 | use2-az1 use2-az2 use2-az3 |
|us-west-2 | com.amazonaws.vpce.us-west-2.vpce-svc-049bbd33f61271781 | usw2-az2 usw2-az1 usw2-az3 |

View File

@ -0,0 +1,15 @@
<details><summary>IPアクセスリストを管理する</summary>
ClickHouse Cloudのサービスリストから作業するサービスを選び、**設定**に切り替えます。
![サービスの設定](@site/docs/ja/_snippets/images/cloud-service-settings.png)
IPアクセスリストが**現在、このサービスにアクセスできるトラフィックはありません**と表示される場合は、**エントリを追加**して問題を解決できます。
![サービスがトラフィックを許可しているか確認する](@site/docs/ja/_snippets/images/ip-allow-list-check-list.png)
クイックスタートのために、ローカルのセキュリティポリシーが許可する場合は、現在のIPアドレスのみを追加することができます。これを行うには、**現在のIPを追加**を使用し、現在のIPと説明「ホームIP」でフォームを自動入力します。必要に応じてフォームを修正し、**エントリを追加**し**エントリを送信**します。
![現在のIPアドレスを追加する](@site/docs/ja/_snippets/images/ip-allow-list-add-current-ip.png)
</details>

View File

@ -0,0 +1,61 @@
1. ClickHouse Cloud Serviceを作成した後、認証情報画面でMySQLタブを選択します。
![Credentials screen - Prompt](./images/mysql1.png)
2. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これにより、そのサービスでポート`3306`が公開され、ユニークなMySQLユーザー名を含むMySQL接続画面が表示されます。パスワードはサービスのデフォルトユーザーのパスワードと同じになります。
![Credentials screen - Enabled MySQL](./images/mysql2.png)
代わりに、既存のサービスに対してMySQLインターフェースを有効にするには:
3. サービスが`Running`状態であることを確認し、MySQLインターフェースを有効にするサービスの「接続文字列を表示」ボタンをクリックします。
![Connection screen - Prompt MySQL](./images/mysql3.png)
4. この特定のサービスに対してMySQLインターフェースを有効にするためにスイッチを切り替えます。これにより、デフォルトのパスワードを入力するよう求められます。
![Connection screen - Prompt MySQL](./images/mysql4.png)
5. パスワードを入力すると、このサービスのMySQL接続文字列が表示されます。
![Connection screen - MySQL Enabled](./images/mysql5.png)
## ClickHouse Cloudで複数のMySQLユーザーを作成する
デフォルトでは、`mysql4<subdomain>`という組み込みユーザーがあり、これは`default`ユーザーと同じパスワードを使用します。`<subdomain>`部分はあなたのClickHouse Cloudホスト名の最初のセグメントです。このフォーマットは、安全な接続を実装しているが[TLSハンドシェイクでSNI情報を提供しない](https://www.cloudflare.com/learning/ssl/what-is-sni)ツールMySQLコンソールクライアントがその一例で作業するために必要です。この場合、ユーザー名に追加のヒントを含めずには内部ルーティングを行うことができません。
これにより、MySQLインターフェースで使用する新しいユーザーを作成する際には、`mysql4<subdomain>_<username>`のフォーマットを使用することを_強くお勧めします_。ここで、`<subdomain>`はあなたのCloudサービスを識別するためのヒントであり、`<username>`は選択した任意のサフィックスです。
:::tip
ClickHouse Cloudホスト名が`foobar.us-east1.aws.clickhouse.cloud`の場合、`<subdomain>`部分は`foobar`に相当し、カスタムMySQLユーザー名は`mysql4foobar_team1`のようになります。
:::
MySQLインターフェースを使用するために追加のユーザーを作成することができます。例えば、追加の設定を適用する必要がある場合などです。
1. オプション - カスタムユーザーに適用する[設定プロフィール](https://clickhouse.com/docs/ja/sql-reference/statements/create/settings-profile)を作成します。たとえば、後で作成するユーザーで接続するときにデフォルトで適用される追加設定を持つ`my_custom_profile`:
```sql
CREATE SETTINGS PROFILE my_custom_profile SETTINGS prefer_column_name_to_alias=1;
```
`prefer_column_name_to_alias`は単なる例として使用されます。ここに他の設定を使用することもできます。
2. 以下のフォーマットを使用して[ユーザーを作成](https://clickhouse.com/docs/ja/sql-reference/statements/create/user)します: `mysql4<subdomain>_<username>` ([上記参照](#creating-multiple-mysql-users-in-clickhouse-cloud))。パスワードはダブルSHA1形式である必要があります。例:
```sql
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$';
```
または、このユーザーにカスタムプロフィールを使用したい場合:
```sql
CREATE USER mysql4foobar_team1 IDENTIFIED WITH double_sha1_password BY 'YourPassword42$' SETTINGS PROFILE 'my_custom_profile';
```
ここで、`my_custom_profile`は前に作成したプロフィールの名前です。
3. 新しいユーザーに必要なアクセス権を付与して、目的のテーブルまたはデータベースと対話できるようにします。[権限を付与](https://clickhouse.com/docs/ja/sql-reference/statements/grant)する例として、たとえば`system.query_log`のみのアクセスを付与したい場合:
```sql
GRANT SELECT ON system.query_log TO mysql4foobar_team1;
```
4. 作成したユーザーを使用して、MySQLインターフェースでClickHouse Cloudサービスに接続します。
### ClickHouse Cloudでの複数のMySQLユーザーのトラブルシューティング
新しいMySQLユーザーを作成し、MySQL CLIクライアントで接続しているときに以下のエラーが表示された場合
```
ERROR 2013 (HY000): Lost connection to MySQL server at 'reading authorization packet', system error: 54
```
この場合、ユーザー名が`mysql4<subdomain>_<username>`形式に従っていることを確認してください。[上記](#creating-multiple-mysql-users-in-clickhouse-cloud)で説明されています。

View File

@ -0,0 +1,87 @@
ClickHouseサーバーにMySQLインターフェースを有効にする方法については[公式ドキュメント](https://clickhouse.com/docs/ja/interfaces/mysql)を参照してください。
サーバーの `config.xml` にエントリを追加することに加えて、
```xml
<clickhouse>
<mysql_port>9004</mysql_port>
</clickhouse>
```
MySQLインターフェースを利用するユーザーには、[二重SHA1パスワード暗号化](https://clickhouse.com/docs/ja/operations/settings/settings-users#user-namepassword)を使用することが**必要**です。
シェルから二重SHA1で暗号化されたランダムパスワードを生成するには以下を実行してください
```shell
PASSWORD=$(base64 < /dev/urandom | head -c16); echo "$PASSWORD"; echo -n "$PASSWORD" | sha1sum | tr -d '-' | xxd -r -p | sha1sum | tr -d '-'
```
出力は以下のようになります:
```
LZOQYnqQN4L/T6L0
fbc958cc745a82188a51f30de69eebfc67c40ee4
```
最初の行は生成されたパスワードで、2行目はClickHouseの設定に使用するハッシュです。
以下は生成されたハッシュを使用する`mysql_user`の設定例です:
`/etc/clickhouse-server/users.d/mysql_user.xml`
```xml
<users>
<mysql_user>
<password_double_sha1_hex>fbc958cc745a82188a51f30de69eebfc67c40ee4</password_double_sha1_hex>
<networks>
<ip>::/0</ip>
</networks>
<profile>default</profile>
<quota>default</quota>
</mysql_user>
</users>
```
`password_double_sha1_hex` エントリを自分で生成した二重SHA1ハッシュに置き換えてください。
さらに、BIツールがMySQLコネクタを使用する際にデータベーススキーマを適切に調査できるように、`SHOW [FULL] COLUMNS` クエリの結果でMySQLネイティブタイプを表示するために、`use_mysql_types_in_show_columns`を使用することを推奨します。
例えば:
`/etc/clickhouse-server/users.d/mysql_user.xml`
```xml
<profiles>
<default>
<use_mysql_types_in_show_columns>1</use_mysql_types_in_show_columns>
</default>
</profiles>
```
または、デフォルト以外の異なるプロファイルに割り当てることもできます。
`mysql` バイナリが利用可能であれば、コマンドラインから接続をテストできます。以下は、サンプルのユーザー名 (`mysql_user`) とパスワード (`LZOQYnqQN4L/T6L0`) を使用したコマンドです:
```bash
mysql --protocol tcp -h localhost -u mysql_user -P 9004 --password=LZOQYnqQN4L/T6L0
```
```
mysql> show databases;
+--------------------+
| name |
+--------------------+
| INFORMATION_SCHEMA |
| default |
| information_schema |
| system |
+--------------------+
4行取得しました (0.00 sec)
4行読み込み、603.00 B、0.00156秒で、2564行/秒、377.48 KiB/秒
```
最後に、ClickHouseサーバーを希望するIPアドレスでリッスンするように設定します。例えば、`config.xml` の中で、すべてのアドレスでリッスンするために以下をアンコメントしてください:
```bash
<listen_host>::</listen_host>
```

View File

@ -0,0 +1,19 @@
## クラウドのバックアップとリストア
各サービスは毎日バックアップされています。サービスの**バックアップ**タブで、サービスのバックアップリストを見ることができます。そこからバックアップをリストアしたり、バックアップを削除することができます。
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-list.png)
**バックアップをリストア**アイコンをクリックすると、新しく作成されるサービスの**サービス名**を指定して、**このバックアップをリストア**できます。
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-restore.png)
新しいサービスは、準備が整うまでサービスリストに**プロビジョニング**として表示されます。
![バックアップのリスト](@site/docs/ja/_snippets/images/cloud-backup-new-service.png)
新しいサービスのプロビジョニングが完了すると、接続できます。その後…
:::note
ClickHouse Cloud サービスを利用する際に、SQL クライアントで `BACKUP` および `RESTORE` コマンドを使用しないでください。クラウドのバックアップは UI から管理する必要があります。
:::

Some files were not shown because too many files have changed in this diff Show More