Merge branch 'master' into fix-preadv2-with-nowait

This commit is contained in:
Antonio Andelic 2024-10-15 15:31:23 +02:00
commit d8af90be5c
1268 changed files with 35836 additions and 19722 deletions

View File

@ -16,3 +16,6 @@
# Applied Black formatter for Python code
e6f5a3f98b21ba99cf274a9833797889e020a2b3
# Enabling clang-tidy readability-else-no-return rule
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e

View File

@ -15,7 +15,7 @@ assignees: ''
**Use case**
> A clear and concise description of what is the intended usage scenario is.
> A clear and concise description of what the intended usage scenario is.
**Describe the solution you'd like**

View File

@ -4,7 +4,6 @@ self-hosted-runner:
- func-tester
- func-tester-aarch64
- fuzzer-unit-tester
- stress-tester
- style-checker
- style-checker-aarch64
- release-maker

View File

@ -229,18 +229,26 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
#############################################################################################
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
IntegrationTestsAsanOldAnalyzer:
needs: [RunConfig, BuilderDebAsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
test_name: Integration tests (asan, old analyzer)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
if: ${{ !failure() && !cancelled() }}
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !cancelled() }}
@ -250,7 +258,8 @@ jobs:
- FunctionalStatelessTestAsan
- FunctionalStatefulTestDebug
- StressTestTsan
- IntegrationTestsRelease
- IntegrationTestsTsan
- IntegrationTestsAsanOldAnalyzer
- CompatibilityCheckX86
- CompatibilityCheckAarch64
runs-on: [self-hosted, style-checker]

View File

@ -33,9 +33,6 @@ jobs:
filter: tree:0
- name: Debug Info
uses: ./.github/actions/debug
- name: Cancel previous Sync PR workflow
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
- name: Set pending Sync status
run: |
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status

View File

@ -374,7 +374,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestTsan:
needs: [RunConfig, BuilderDebTsan]
@ -382,7 +382,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestMsan:
needs: [RunConfig, BuilderDebMsan]
@ -390,7 +390,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (msan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestUBsan:
needs: [RunConfig, BuilderDebUBsan]
@ -398,7 +398,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (ubsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
StressTestDebug:
needs: [RunConfig, BuilderDebDebug]
@ -406,7 +406,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Stress test (debug)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
#############################################################################################
############################# INTEGRATION TESTS #############################################
@ -417,7 +417,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsAnalyzerAsan:
needs: [RunConfig, BuilderDebAsan]
@ -425,7 +425,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (asan, old analyzer)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsTsan:
needs: [RunConfig, BuilderDebTsan]
@ -433,7 +433,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (tsan)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
IntegrationTestsRelease:
needs: [RunConfig, BuilderDebRelease]
@ -441,7 +441,7 @@ jobs:
uses: ./.github/workflows/reusable_test.yml
with:
test_name: Integration tests (release)
runner_type: stress-tester
runner_type: func-tester
data: ${{ needs.RunConfig.outputs.data }}
FinishCheck:
if: ${{ !cancelled() }}

View File

@ -339,7 +339,6 @@ set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
if (OS_DARWIN)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
endif()

View File

@ -110,8 +110,7 @@ struct DecomposedFloat
{
if (!isNegative())
return rhs > 0 ? -1 : 1;
else
return rhs >= 0 ? -1 : 1;
return rhs >= 0 ? -1 : 1;
}
/// The case of the most negative integer
@ -128,8 +127,7 @@ struct DecomposedFloat
if (mantissa() == 0)
return 0;
else
return -1;
return -1;
}
}
@ -169,9 +167,8 @@ struct DecomposedFloat
/// Float has no fractional part means that the numbers are equal.
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
return 0;
else
/// Float has fractional part means its abs value is larger.
return isNegative() ? -1 : 1;
/// Float has fractional part means its abs value is larger.
return isNegative() ? -1 : 1;
}

View File

@ -205,8 +205,7 @@ JSON::ElementType JSON::getType() const
Pos after_string = skipString();
if (after_string < ptr_end && *after_string == ':')
return TYPE_NAME_VALUE_PAIR;
else
return TYPE_STRING;
return TYPE_STRING;
}
default:
throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'");
@ -474,8 +473,7 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
if (it == end())
return nullptr;
else
return it->data();
return it->data();
}
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
if (*pos == '"')
return false;
else if (*pos == '\\')
if (*pos == '\\')
return true;
throw JSONException("JSON: unexpected end of data.");
}
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
if (*pos == '"')
return false;
else if (pos < ptr_end)
if (pos < ptr_end)
return true;
throw JSONException("JSON: unexpected end of data.");
}
@ -682,10 +680,9 @@ double JSON::toDouble() const
if (type == TYPE_NUMBER)
return getDouble();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
else
throw JSONException("JSON: cannot convert value to double.");
throw JSONException("JSON: cannot convert value to double.");
}
Int64 JSON::toInt() const
@ -694,10 +691,9 @@ Int64 JSON::toInt() const
if (type == TYPE_NUMBER)
return getInt();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
else
throw JSONException("JSON: cannot convert value to signed integer.");
throw JSONException("JSON: cannot convert value to signed integer.");
}
UInt64 JSON::toUInt() const
@ -706,10 +702,9 @@ UInt64 JSON::toUInt() const
if (type == TYPE_NUMBER)
return getUInt();
else if (type == TYPE_STRING)
if (type == TYPE_STRING)
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
else
throw JSONException("JSON: cannot convert value to unsigned integer.");
throw JSONException("JSON: cannot convert value to unsigned integer.");
}
std::string JSON::toString() const
@ -718,11 +713,9 @@ std::string JSON::toString() const
if (type == TYPE_STRING)
return getString();
else
{
Pos pos = skipElement();
return std::string(ptr_begin, pos - ptr_begin);
}
Pos pos = skipElement();
return std::string(ptr_begin, pos - ptr_begin);
}

View File

@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
if (key_json.isType<T>())
return key_json.get<T>();
else
return default_;
}
else
return default_;
}
return default_;
}

View File

@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
}
else if (size >= 4)
if (size >= 4)
{
/// Chunks of 4..7 bytes.
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
}
else if (size >= 2)
if (size >= 2)
{
/// Chunks of 2..3 bytes.
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
}
else if (size >= 1)
if (size >= 1)
{
/// A single byte.
return *p1 == *p2;

View File

@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
key = arg.substr(key_start);
continue;
}
else
{
key = "";
}
key = "";
if (key_start == std::string::npos)
continue;

View File

@ -330,9 +330,8 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
#if defined(__SSE4_2__)
if (sizeof...(symbols) >= 5)
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
else
#endif
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
}
template <bool positive, ReturnMode return_mode>
@ -341,9 +340,8 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
#if defined(__SSE4_2__)
if (symbols.str.size() >= 5)
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
else
#endif
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
}
}

View File

@ -33,8 +33,7 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
uint64_t value;
if (setting_file >> value)
return {value};
else
return {}; /// e.g. the cgroups default "max"
return {}; /// e.g. the cgroups default "max"
}
current_cgroup = current_cgroup.parent_path();
}

View File

@ -1420,8 +1420,6 @@ config
configs
conformant
congruential
conjuction
conjuctive
connectionId
const
contrib
@ -1698,7 +1696,6 @@ formatReadableSize
formatReadableTimeDelta
formatRow
formatRowNoNewline
formated
formatschema
formatter
formatters
@ -3048,3 +3045,89 @@ znode
znodes
zookeeperSessionUptime
zstd
ArrowCompression
CapnProtoEnumComparingMode
DateTimeInputFormat
DateTimeOutputFormat
DateTimeOverflowBehavior
deserialize
dotall
EachRow
EscapingRule
IdentifierQuotingRule
IdentifierQuotingStyle
IntervalOutputFormat
MsgPackUUIDRepresentation
ORCCompression
ParquetCompression
ParquetVersion
SchemaInferenceMode
alloc
CacheWarmer
conjuctive
cors
CORS
countIf
DefaultTableEngine
dereference
DistributedDDLOutputMode
DistributedProductMode
formatdatetime
inequal
INVOKER
ITION
JoinAlgorithm
JoinStrictness
keepalive
ListObject
ListObjects
LoadBalancing
LocalFSReadMethod
LogQueriesType
LogsLevel
MaxThreads
MemorySample
multibuffer
multiif
multiread
multithreading
MySQLDataTypesSupport
nonconst
NonZeroUInt
nullptr
OverflowMode
OverflowModeGroupBy
ParallelReplicasMode
param
parsedatetime
perf
PerfEventInfo
perkey
prefetched
prefetches
prefetching
preimage
QueryCacheNondeterministicFunctionHandling
QueryCacheSystemTableHandling
remerge
replcase
rerange
RetryStrategy
rowlist
SetOperationMode
ShortCircuitFunctionEvaluation
SQLSecurityType
sumIf
TCPHandler
throwif
TotalsMode
TransactionsWaitCSNMode
undelete
unmerged
DataPacket
DDLs
DistributedCacheLogMode
DistributedCachePoolBehaviourOnLimit
SharedJoin
ShareSet
unacked

View File

@ -11,6 +11,38 @@ option (ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries n
if (ARCH_NATIVE)
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
# Populate the ENABLE_ option flags. This is required for the build of some third-party dependencies, specifically snappy, which
# (somewhat weirdly) expects the relative SNAPPY_HAVE_ preprocessor variables to be populated, in addition to the microarchitecture
# feature flags being enabled in the compiler. This fixes the ARCH_NATIVE flag by automatically populating the ENABLE_ option flags
# according to the current CPU's capabilities, detected using clang.
if (ARCH_AMD64)
execute_process(
COMMAND sh -c "clang -E - -march=native -###"
INPUT_FILE /dev/null
OUTPUT_QUIET
ERROR_VARIABLE TEST_FEATURE_RESULT)
macro(TEST_AMD64_FEATURE TEST_FEATURE_RESULT feat flag)
if (${TEST_FEATURE_RESULT} MATCHES "\"\\+${feat}\"")
set(${flag} ON)
else ()
set(${flag} OFF)
endif ()
endmacro()
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} ssse3 ENABLE_SSSE3)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.1 ENABLE_SSE41)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.2 ENABLE_SSE42)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} vpclmulqdq ENABLE_PCLMULQDQ)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} popcnt ENABLE_POPCNT)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx ENABLE_AVX)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx2 ENABLE_AVX2)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512f ENABLE_AVX512)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512vbmi ENABLE_AVX512_VBMI)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi ENABLE_BMI)
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi2 ENABLE_BMI2)
endif ()
elseif (ARCH_AARCH64)
# ARM publishes almost every year a new revision of it's ISA [1]. Each version comes with new mandatory and optional features from
# which CPU vendors can pick and choose. This creates a lot of variability ... We provide two build "profiles", one for maximum

View File

@ -48,6 +48,8 @@ if (NOT LINKER_NAME)
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
elseif (OS_DARWIN)
find_program (LLD_PATH NAMES "ld")
# Duplicate libraries passed to the linker is not a problem.
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_warn_duplicate_libraries")
endif ()
if (LLD_PATH)
if (OS_LINUX OR OS_DARWIN)

2
contrib/grpc vendored

@ -1 +1 @@
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
Subproject commit 62e871c36fa93c0af939bd31762845265214fe3d

2
contrib/libdivide vendored

@ -1 +1 @@
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04
Subproject commit 01526031eb79375dc85e0212c966d2c514a01234

2
contrib/simdjson vendored

@ -1 +1 @@
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
Subproject commit e341c8b43861b43de29c48ab65f292d997096953

View File

@ -0,0 +1 @@
[rabbitmq_consistent_hash_exchange].

View File

@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
ssl_options.certfile = /etc/rabbitmq/server-cert.pem
ssl_options.keyfile = /etc/rabbitmq/server-key.pem
vm_memory_high_watermark.absolute = 2GB

View File

@ -41,7 +41,7 @@ sidebar_label: 2022
* Backported in [#25364](https://github.com/ClickHouse/ClickHouse/issues/25364): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25387](https://github.com/ClickHouse/ClickHouse/issues/25387): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25455](https://github.com/ClickHouse/ClickHouse/issues/25455): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25505](https://github.com/ClickHouse/ClickHouse/issues/25505): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### NO CL ENTRY

View File

@ -40,7 +40,7 @@ sidebar_label: 2022
* Backported in [#25362](https://github.com/ClickHouse/ClickHouse/issues/25362): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25386](https://github.com/ClickHouse/ClickHouse/issues/25386): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25456](https://github.com/ClickHouse/ClickHouse/issues/25456): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25504](https://github.com/ClickHouse/ClickHouse/issues/25504): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
#### NO CL ENTRY

View File

@ -24,7 +24,7 @@ sidebar_label: 2022
* Backported in [#25363](https://github.com/ClickHouse/ClickHouse/issues/25363): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Backported in [#25388](https://github.com/ClickHouse/ClickHouse/issues/25388): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Backported in [#25448](https://github.com/ClickHouse/ClickHouse/issues/25448): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
#### NOT FOR CHANGELOG / INSIGNIFICANT

View File

@ -133,7 +133,7 @@ sidebar_label: 2022
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).

View File

@ -0,0 +1,49 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.3.12.75-lts (7cb5dff8019) FIXME as compared to v24.3.11.7-lts (28795d0a47e)
#### Improvement
* Backported in [#69607](https://github.com/ClickHouse/ClickHouse/issues/69607): Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69785](https://github.com/ClickHouse/ClickHouse/issues/69785): Fix attaching table when pg dbname contains "-" in MaterializedPostgreSQL. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
* Backported in [#69461](https://github.com/ClickHouse/ClickHouse/issues/69461): Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#68217](https://github.com/ClickHouse/ClickHouse/issues/68217): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
* Backported in [#69437](https://github.com/ClickHouse/ClickHouse/issues/69437): After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
* Backported in [#69827](https://github.com/ClickHouse/ClickHouse/issues/69827): Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#69294](https://github.com/ClickHouse/ClickHouse/issues/69294): Fix merging of aggregated data for grouping sets. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#70470](https://github.com/ClickHouse/ClickHouse/issues/70470): Fix inf loop after `restore replica` in the replicated merge tree with zero copy. [#69293](https://github.com/ClickHouse/ClickHouse/pull/69293) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
* Backported in [#69456](https://github.com/ClickHouse/ClickHouse/issues/69456): Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#69497](https://github.com/ClickHouse/ClickHouse/issues/69497): Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#69724](https://github.com/ClickHouse/ClickHouse/issues/69724): Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#69668](https://github.com/ClickHouse/ClickHouse/issues/69668): Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#69792](https://github.com/ClickHouse/ClickHouse/issues/69792): Make getHyperrectangleForRowGroup not throw an exception when the data type in parquet file is not convertable into the requested data type. Solved the user's problem when the Parquet file had Decimal64 data type and the column data type was DateTime. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#70089](https://github.com/ClickHouse/ClickHouse/issues/70089): Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
* Backported in [#70077](https://github.com/ClickHouse/ClickHouse/issues/70077): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
* Backported in [#70162](https://github.com/ClickHouse/ClickHouse/issues/70162): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#70232](https://github.com/ClickHouse/ClickHouse/issues/70232): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70179](https://github.com/ClickHouse/ClickHouse/issues/70179): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70241](https://github.com/ClickHouse/ClickHouse/issues/70241): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#70397](https://github.com/ClickHouse/ClickHouse/issues/70397): Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
#### NO CL CATEGORY
* Backported in [#69526](https://github.com/ClickHouse/ClickHouse/issues/69526):. [#67029](https://github.com/ClickHouse/ClickHouse/pull/67029) ([Alexander Tokmakov](https://github.com/tavplubix)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69506](https://github.com/ClickHouse/ClickHouse/issues/69506): Better handling of errors from azure storage. [#62306](https://github.com/ClickHouse/ClickHouse/pull/62306) ([Anton Popov](https://github.com/CurtizJ)).
* Backported in [#69955](https://github.com/ClickHouse/ClickHouse/issues/69955): Output an operation error for ZK Multi request failed operation into log. [#68127](https://github.com/ClickHouse/ClickHouse/pull/68127) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#69485](https://github.com/ClickHouse/ClickHouse/issues/69485): Fix test_role & test_keeper_s3_snapshot integration tests. [#69013](https://github.com/ClickHouse/ClickHouse/pull/69013) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#70028](https://github.com/ClickHouse/ClickHouse/issues/70028): Remove stale moving parts without zookeeper. [#69075](https://github.com/ClickHouse/ClickHouse/pull/69075) ([Kirill](https://github.com/kirillgarbar)).
* Backported in [#69421](https://github.com/ClickHouse/ClickHouse/issues/69421): Fix: Not-ready Set with parallel replicas. [#69264](https://github.com/ClickHouse/ClickHouse/pull/69264) ([Igor Nikonov](https://github.com/devcrafter)).
* Backported in [#69747](https://github.com/ClickHouse/ClickHouse/issues/69747): Add function `kill_ci_runner`. Kill runner when pre-pull failed. [#69557](https://github.com/ClickHouse/ClickHouse/pull/69557) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#69636](https://github.com/ClickHouse/ClickHouse/issues/69636): Add more contexts to the debug action and use it broadly. [#69599](https://github.com/ClickHouse/ClickHouse/pull/69599) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* The test is flaky and the feature experimental. [#70269](https://github.com/ClickHouse/ClickHouse/pull/70269) ([Raúl Marín](https://github.com/Algunenano)).
* Fix test distributed inter server secret in 24.3. [#70325](https://github.com/ClickHouse/ClickHouse/pull/70325) ([Raúl Marín](https://github.com/Algunenano)).

View File

@ -0,0 +1,73 @@
---
sidebar_position: 1
sidebar_label: 2024
---
# 2024 Changelog
### ClickHouse release v24.8.5.115-lts (8c4cb00a384) FIXME as compared to v24.8.4.13-lts (53195bc189b)
#### Improvement
* Backported in [#70046](https://github.com/ClickHouse/ClickHouse/issues/70046): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
#### Bug Fix (user-visible misbehavior in an official stable release)
* Backported in [#69786](https://github.com/ClickHouse/ClickHouse/issues/69786): Fix attaching table when pg dbname contains "-" in MaterializedPostgreSQL. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
* Backported in [#70318](https://github.com/ClickHouse/ClickHouse/issues/70318): Fixed error on generated columns in MaterializedPostgreSQL when adnum ordering is broken [#63161](https://github.com/ClickHouse/ClickHouse/issues/63161). Fixed error on id column with nextval expression as default MaterializedPostgreSQL when there are generated columns in table. Fixed error on dropping publication with symbols except [a-z1-9-]. [#67664](https://github.com/ClickHouse/ClickHouse/pull/67664) ([Kruglov Kirill](https://github.com/1on)).
* Backported in [#69467](https://github.com/ClickHouse/ClickHouse/issues/69467): Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#69735](https://github.com/ClickHouse/ClickHouse/issues/69735): Fix crash in `lag`/`lead` which is introduced in [#67091](https://github.com/ClickHouse/ClickHouse/issues/67091). [#68262](https://github.com/ClickHouse/ClickHouse/pull/68262) ([lgbo](https://github.com/lgbo-ustc)).
* Backported in [#69444](https://github.com/ClickHouse/ClickHouse/issues/69444): After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
* Backported in [#69810](https://github.com/ClickHouse/ClickHouse/issues/69810): Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#69757](https://github.com/ClickHouse/ClickHouse/issues/69757): Fix incorrect results of Fix uniq and GROUP BY for JSON/Dynamic types. [#69203](https://github.com/ClickHouse/ClickHouse/pull/69203) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70195](https://github.com/ClickHouse/ClickHouse/issues/70195): Fix insertion of incomplete type into Dynamic during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#69398](https://github.com/ClickHouse/ClickHouse/issues/69398): Mark Dynamic type as not safe primary key type to avoid issues with Fields. [#69311](https://github.com/ClickHouse/ClickHouse/pull/69311) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#69704](https://github.com/ClickHouse/ClickHouse/issues/69704): Improve restoring of access entities' dependencies. [#69346](https://github.com/ClickHouse/ClickHouse/pull/69346) ([Vitaly Baranov](https://github.com/vitlibar)).
* Backported in [#69459](https://github.com/ClickHouse/ClickHouse/issues/69459): Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#69503](https://github.com/ClickHouse/ClickHouse/issues/69503): Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#69480](https://github.com/ClickHouse/ClickHouse/issues/69480): Quick fix for s3queue problem on 24.6 or create query with database replicated. [#69454](https://github.com/ClickHouse/ClickHouse/pull/69454) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#69535](https://github.com/ClickHouse/ClickHouse/issues/69535): Fixed case when memory consumption was too high because of the squashing in `INSERT INTO ... SELECT` or `CREATE TABLE AS SELECT` queries. [#69469](https://github.com/ClickHouse/ClickHouse/pull/69469) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
* Backported in [#69696](https://github.com/ClickHouse/ClickHouse/issues/69696): Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70439](https://github.com/ClickHouse/ClickHouse/issues/70439): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#69666](https://github.com/ClickHouse/ClickHouse/issues/69666): Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#70191](https://github.com/ClickHouse/ClickHouse/issues/70191): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
* Backported in [#69798](https://github.com/ClickHouse/ClickHouse/issues/69798): Make getHyperrectangleForRowGroup not throw an exception when the data type in parquet file is not convertable into the requested data type. Solved the user's problem when the Parquet file had Decimal64 data type and the column data type was DateTime. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
* Backported in [#70410](https://github.com/ClickHouse/ClickHouse/issues/70410): Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)).
* Backported in [#70019](https://github.com/ClickHouse/ClickHouse/issues/70019): Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#69941](https://github.com/ClickHouse/ClickHouse/issues/69941): Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70001](https://github.com/ClickHouse/ClickHouse/issues/70001): Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
* Backported in [#70081](https://github.com/ClickHouse/ClickHouse/issues/70081): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
* Backported in [#70068](https://github.com/ClickHouse/ClickHouse/issues/70068): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
* Backported in [#70166](https://github.com/ClickHouse/ClickHouse/issues/70166): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
* Backported in [#70236](https://github.com/ClickHouse/ClickHouse/issues/70236): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70203](https://github.com/ClickHouse/ClickHouse/issues/70203): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#70183](https://github.com/ClickHouse/ClickHouse/issues/70183): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70251](https://github.com/ClickHouse/ClickHouse/issues/70251): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70228](https://github.com/ClickHouse/ClickHouse/issues/70228): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70243](https://github.com/ClickHouse/ClickHouse/issues/70243): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
* Backported in [#70432](https://github.com/ClickHouse/ClickHouse/issues/70432): Fix possible crash in JSON column. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70307](https://github.com/ClickHouse/ClickHouse/issues/70307): Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)).
* Backported in [#70274](https://github.com/ClickHouse/ClickHouse/issues/70274): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70345](https://github.com/ClickHouse/ClickHouse/issues/70345): Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)).
* Backported in [#70426](https://github.com/ClickHouse/ClickHouse/issues/70426): Fix ALTER of Dynamic type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)).
* Backported in [#70371](https://github.com/ClickHouse/ClickHouse/issues/70371): Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
#### NO CL ENTRY
* NO CL ENTRY: 'Revert "Backport [#70146](https://github.com/ClickHouse/ClickHouse/issues/70146) to 24.8: Upgrade integration-runner image"'. [#70324](https://github.com/ClickHouse/ClickHouse/pull/70324) ([Max K.](https://github.com/maxknv)).
#### NOT FOR CHANGELOG / INSIGNIFICANT
* Backported in [#69961](https://github.com/ClickHouse/ClickHouse/issues/69961): Output an operation error for ZK Multi request failed operation into log. [#68127](https://github.com/ClickHouse/ClickHouse/pull/68127) ([Aleksei Filatov](https://github.com/aalexfvk)).
* Backported in [#69491](https://github.com/ClickHouse/ClickHouse/issues/69491): Fix test_role & test_keeper_s3_snapshot integration tests. [#69013](https://github.com/ClickHouse/ClickHouse/pull/69013) ([Shankar](https://github.com/shiyer7474)).
* Backported in [#69953](https://github.com/ClickHouse/ClickHouse/issues/69953): Remove stale moving parts without zookeeper. [#69075](https://github.com/ClickHouse/ClickHouse/pull/69075) ([Kirill](https://github.com/kirillgarbar)).
* Backported in [#69353](https://github.com/ClickHouse/ClickHouse/issues/69353): Fix: Not-ready Set with parallel replicas. [#69264](https://github.com/ClickHouse/ClickHouse/pull/69264) ([Igor Nikonov](https://github.com/devcrafter)).
* Backported in [#69427](https://github.com/ClickHouse/ClickHouse/issues/69427): Fix 24.8 setting compatibility `rows_before_aggregation`. [#69394](https://github.com/ClickHouse/ClickHouse/pull/69394) ([Nikita Fomichev](https://github.com/fm4v)).
* Backported in [#69689](https://github.com/ClickHouse/ClickHouse/issues/69689): Add function `kill_ci_runner`. Kill runner when pre-pull failed. [#69557](https://github.com/ClickHouse/ClickHouse/pull/69557) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#69639](https://github.com/ClickHouse/ClickHouse/issues/69639): Add more contexts to the debug action and use it broadly. [#69599](https://github.com/ClickHouse/ClickHouse/pull/69599) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#69721](https://github.com/ClickHouse/ClickHouse/issues/69721): Prohibit `ALTER TABLE ... ADD INDEX ... TYPE` inverted if setting = 0. [#69684](https://github.com/ClickHouse/ClickHouse/pull/69684) ([Robert Schulze](https://github.com/rschu1ze)).
* Backported in [#69972](https://github.com/ClickHouse/ClickHouse/issues/69972): S3Queue: support having deprecated settings to not fail server startup. [#69769](https://github.com/ClickHouse/ClickHouse/pull/69769) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#70283](https://github.com/ClickHouse/ClickHouse/issues/70283): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
* Backported in [#70260](https://github.com/ClickHouse/ClickHouse/issues/70260): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)).
* Backported in [#70314](https://github.com/ClickHouse/ClickHouse/issues/70314): CI: Remove await feature from release branches. [#70294](https://github.com/ClickHouse/ClickHouse/pull/70294) ([Max K.](https://github.com/maxknv)).
* Backported in [#70380](https://github.com/ClickHouse/ClickHouse/issues/70380): Fix tiny mistake, responsible for some of kafka test flaps. Example [report](https://s3.amazonaws.com/clickhouse-test-reports/0/3198aafac59c368993e7b5f49d95674cc1b1be18/integration_tests__release__[2_4].html). [#70352](https://github.com/ClickHouse/ClickHouse/pull/70352) ([filimonov](https://github.com/filimonov)).
* Backported in [#70405](https://github.com/ClickHouse/ClickHouse/issues/70405): Closes [#69634](https://github.com/ClickHouse/ClickHouse/issues/69634). [#70354](https://github.com/ClickHouse/ClickHouse/pull/70354) ([pufit](https://github.com/pufit)).

View File

@ -82,7 +82,7 @@ cd ./utils/check-style
# Check duplicate includes
./check-duplicate-includes.sh
# Check c++ formatiing
# Check c++ formatting
./check-style
# Check python formatting with black

View File

@ -63,7 +63,34 @@ Currently there are 3 ways to authenticate:
- `SAS Token` - Can be used by providing an `endpoint`, `connection_string` or `storage_account_url`. It is identified by presence of '?' in the url.
- `Workload Identity` - Can be used by providing an `endpoint` or `storage_account_url`. If `use_workload_identity` parameter is set in config, ([workload identity](https://github.com/Azure/azure-sdk-for-cpp/tree/main/sdk/identity/azure-identity#authenticate-azure-hosted-applications)) is used for authentication.
### Data cache {#data-cache}
`Azure` table engine supports data caching on local disk.
See filesystem cache configuration options and usage in this [section](/docs/en/operations/storing-data.md/#using-local-cache).
Caching is made depending on the path and ETag of the storage object, so clickhouse will not read a stale cache version.
To enable caching use a setting `filesystem_cache_name = '<name>'` and `enable_filesystem_cache = 1`.
```sql
SELECT *
FROM azureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', 'test_container', 'test_table', 'CSV')
SETTINGS filesystem_cache_name = 'cache_for_azure', enable_filesystem_cache = 1;
```
1. add the following section to clickhouse configuration file:
``` xml
<clickhouse>
<filesystem_caches>
<cache_for_azure>
<path>path to cache directory</path>
<max_size>10Gi</max_size>
</cache_for_azure>
</filesystem_caches>
</clickhouse>
```
2. reuse cache configuration (and therefore cache storage) from clickhouse `storage_configuration` section, [described here](/docs/en/operations/storing-data.md/#using-local-cache)
## See also

View File

@ -48,6 +48,10 @@ Using named collections:
CREATE TABLE deltalake ENGINE=DeltaLake(deltalake_conf, filename = 'test_table')
```
### Data cache {#data-cache}
`Iceberg` table engine and table function support data caching same as `S3`, `AzureBlobStorage`, `HDFS` storages. See [here](../../../engines/table-engines/integrations/s3.md#data-cache).
## See also
- [deltaLake table function](../../../sql-reference/table-functions/deltalake.md)

View File

@ -6,7 +6,7 @@ sidebar_label: Iceberg
# Iceberg Table Engine
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure, HDFS and locally stored tables.
## Create Table
@ -19,13 +19,16 @@ CREATE TABLE iceberg_table_s3
CREATE TABLE iceberg_table_azure
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
CREATE TABLE iceberg_table_hdfs
ENGINE = IcebergHDFS(path_to_table, [,format] [,compression_method])
CREATE TABLE iceberg_table_local
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
```
**Engine arguments**
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage`, `HDFS` and `File` correspondingly.
`format` stands for the format of data files in the Iceberg table.
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
@ -60,6 +63,10 @@ CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table
Table engine `Iceberg` is an alias to `IcebergS3` now.
### Data cache {#data-cache}
`Iceberg` table engine and table function support data caching same as `S3`, `AzureBlobStorage`, `HDFS` storages. See [here](../../../engines/table-engines/integrations/s3.md#data-cache).
## See also
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)

View File

@ -4,12 +4,8 @@ sidebar_position: 138
sidebar_label: MySQL
---
import CloudAvailableBadge from '@theme/badges/CloudAvailableBadge';
# MySQL Table Engine
<CloudAvailableBadge />
The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server.
## Creating a Table {#creating-a-table}

View File

@ -26,6 +26,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
│ two │ 2 │
└──────┴───────┘
```
## Create Table {#creating-a-table}
``` sql
@ -43,6 +44,37 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will auto-detect compression by file extension.
### Data cache {#data-cache}
`S3` table engine supports data caching on local disk.
See filesystem cache configuration options and usage in this [section](/docs/en/operations/storing-data.md/#using-local-cache).
Caching is made depending on the path and ETag of the storage object, so clickhouse will not read a stale cache version.
To enable caching use a setting `filesystem_cache_name = '<name>'` and `enable_filesystem_cache = 1`.
```sql
SELECT *
FROM s3('http://minio:10000/clickhouse//test_3.csv', 'minioadmin', 'minioadminpassword', 'CSV')
SETTINGS filesystem_cache_name = 'cache_for_s3', enable_filesystem_cache = 1;
```
There are two ways to define cache in configuration file.
1. add the following section to clickhouse configuration file:
``` xml
<clickhouse>
<filesystem_caches>
<cache_for_s3>
<path>path to cache directory</path>
<max_size>10Gi</max_size>
</cache_for_s3>
</filesystem_caches>
</clickhouse>
```
2. reuse cache configuration (and therefore cache storage) from clickhouse `storage_configuration` section, [described here](/docs/en/operations/storing-data.md/#using-local-cache)
### PARTITION BY
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).

View File

@ -374,15 +374,15 @@ Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to
```sql
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
AS
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
(total_number_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_number_of_all_grams) * log(2));
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
AS
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
(total_number_of_all_grams, probability_of_false_positives) -> ceil((total_number_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
AS
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
(total_number_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_number_of_all_grams)), number_of_hash_functions);
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
AS

View File

@ -35,7 +35,7 @@ Engine parameters:
- `root_path` - ZooKeeper path where the `table_name` will be stored.
This path should not contain the prefix defined by `<keeper_map_path_prefix>` config because the prefix will be automatically appended to the `root_path`.
Additionally, format of `auxiliary_zookeper_cluster_name:/some/path` is also supported where `auxiliary_zookeper_cluster` is a ZooKeeper cluster defined inside `<auxiliary_zookeepers>` config.
Additionally, format of `auxiliary_zookeeper_cluster_name:/some/path` is also supported where `auxiliary_zookeeper_cluster` is a ZooKeeper cluster defined inside `<auxiliary_zookeepers>` config.
By default, ZooKeeper cluster defined inside `<zookeeper>` config is used.
- `keys_limit` - number of keys allowed inside the table.
This limit is a soft limit and it can be possible that more keys will end up in the table for some edge cases.

View File

@ -195,6 +195,9 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
- `--print-profile-events` Print `ProfileEvents` packets.
- `--profile-events-delay-ms` Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
- `--jwt` If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
- `--progress` Print progress of query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress printing. Default: TTY in interactive mode, disabled in non-interactive.
- `--progress-table` Print a progress table with changing metrics during query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress table. Default: TTY in interactive mode, disabled in non-interactive.
- `--enable-progress-table-toggle` Enable toggling of the progress table by pressing the control key (Space). Only applicable in interactive mode with the progress table printing enabled. Default: 'true'.
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).

View File

@ -877,7 +877,7 @@ INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1}
SELECT time, json FROM json_as_object FORMAT JSONEachRow
```
```resonse
```response
{"time":"2024-09-16 12:18:10","json":{}}
{"time":"2024-09-16 12:18:13","json":{"any json stucture":"1"}}
{"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
@ -1598,10 +1598,6 @@ the columns from input data will be mapped to the columns from the table by thei
Otherwise, the first row will be skipped.
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
:::
## RowBinaryWithDefaults {#rowbinarywithdefaults}
@ -1624,6 +1620,10 @@ For column `y` data starts with byte `00` that indicates that column has actual
## RowBinary format settings {#row-binary-format-settings}
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
- [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) - Allows to write types in header using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format. Default value - `false`.
- [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) - Allows to read types in header using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format. Default value - `false`.
- [output_format_binary_write_json_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_binary_write_json_as_string) - Allows to write values of [JSON](/docs/en/sql-reference/data-types/newjson.md) data type as JSON [String](/docs/en/sql-reference/data-types/string.md) values in RowBinary output format. Default value - `false`.
- [input_format_binary_read_json_as_string](/docs/en/operations/settings/settings-formats.md/#input_format_binary_read_json_as_string) - Allows to read values of [JSON](/docs/en/sql-reference/data-types/newjson.md) data type as JSON [String](/docs/en/sql-reference/data-types/string.md) values in RowBinary input format. Default value - `false`.
## Values {#data-format-values}

View File

@ -509,7 +509,7 @@ DESC format(JSONEachRow, $$
{"value" : "424242424242"}
$$)
```
```reponse
```response
┌─name──┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ value │ Nullable(Int64) │ │ │ │ │ │
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
@ -910,9 +910,9 @@ This setting is disabled by default.
```sql
SET input_format_json_try_infer_numbers_from_strings = 1;
DESC format(CSV, '"42","42.42"');
DESC format(CSV, '42,42.42');
```
```reponse
```response
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
│ c1 │ Nullable(Int64) │ │ │ │ │ │
│ c2 │ Nullable(Float64) │ │ │ │ │ │

View File

@ -19,6 +19,19 @@ Features:
- Performance Optimizations: Utilizes Indexed DB for efficient caching and state management.
- Local Data Storage: All data is stored locally in the browser, ensuring no data is sent anywhere else.
### ChartDB {#chartdb}
[ChartDB](https://chartdb.io) is a free and open-source tool for visualizing and designing database schemas, including ClickHouse, with a single query. Built with React, it provides a seamless and user-friendly experience, requiring no database credentials or signup to get started.
Features:
- Schema Visualization: Instantly import and visualize your ClickHouse schema, including ER diagrams with materialized views and standard views, showing references to tables.
- AI-Powered DDL Export: Generate DDL scripts effortlessly for better schema management and documentation.
- Multi-SQL Dialect Support: Compatible with a range of SQL dialects, making it versatile for various database environments.
- No Signup or Credentials Needed: All functionality is accessible directly in the browser, keeping it frictionless and secure.
[ChartDB Source Code](https://github.com/chartdb/chartdb).
### Tabix {#tabix}
Web interface for ClickHouse in the [Tabix](https://github.com/tabixio/tabix) project.

View File

@ -31,6 +31,10 @@ The table must be enabled in the server configuration, see the `opentelemetry_sp
The tags or attributes are saved as two parallel arrays, containing the keys and values. Use [ARRAY JOIN](../sql-reference/statements/select/array-join.md) to work with them.
## Log-query-settings
ClickHouse allows you to log changes to query settings during query execution. When enabled, any modifications made to query settings will be recorded in the OpenTelemetry span log. This feature is particularly useful in production environments for tracking configuration changes that may affect query performance.
## Integration with monitoring systems
At the moment, there is no ready tool that can export the tracing data from ClickHouse to a monitoring system.

View File

@ -1057,12 +1057,12 @@ Default value: throw
## deduplicate_merge_projection_mode
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
Possible values:
- throw, drop, rebuild
- ignore, throw, drop, rebuild
Default value: throw

View File

@ -49,6 +49,18 @@ Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHo
See also the description of [max_memory_usage](#settings_max_memory_usage).
For example if you want to set `max_memory_usage_for_user` to 1000 bytes for a user named `clickhouse_read`, you can use the statement
``` sql
ALTER USER clickhouse_read SETTINGS max_memory_usage_for_user = 1000;
```
You can verify it worked by logging out of your client, logging back in, then use the `getSetting` function:
```sql
SELECT getSetting('max_memory_usage_for_user');
```
## max_rows_to_read {#max-rows-to-read}
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.

File diff suppressed because it is too large Load Diff

View File

@ -6,7 +6,7 @@ sidebar_label: User Settings
# Users and Roles Settings
The `users` section of the `user.xml` configuration file contains user settings.
The `users` section of the `users.xml` configuration file contains user settings.
:::note
ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing users. We recommend using it.

File diff suppressed because it is too large Load Diff

View File

@ -10,21 +10,21 @@ Columns:
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
- `view` ([String](../../sql-reference/data-types/string.md)) — Table name.
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh.
- `last_refresh_result` ([String](../../sql-reference/data-types/string.md)) — Outcome of the latest refresh attempt.
- `last_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last refresh attempt. `NULL` if no refresh attempts happened since server startup or table creation.
- `last_success_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last successful refresh. `NULL` if no successful refreshes happened since server startup or table creation.
- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took.
- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start.
- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled.
- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace.
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry.
- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far.
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh.
(There are additional columns related to current refresh progress, but they are currently unreliable.)
- `last_success_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest successful refresh started. NULL if no successful refreshes happened since server startup or table creation.
- `last_success_duration_ms` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — How long the latest refresh took.
- `last_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest refresh attempt finished (if known) or started (if unknown or still running). NULL if no refresh attempts happened since server startup or table creation.
- `last_refresh_replica` ([String](../../sql-reference/data-types/string.md)) — If coordination is enabled, name of the replica that made the current (if running) or previous (if not running) refresh attempt.
- `next_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time at which the next refresh is scheduled to start, if status = Scheduled.
- `exception` ([String](../../sql-reference/data-types/string.md)) — Error message from previous attempt if it failed.
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How many failed attempts there were so far, for the current refresh.
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. Not available if status is `RunningOnAnotherReplica`.
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. Not available if status is `RunningOnAnotherReplica`.
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of bytes read during the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number rof bytes written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
**Example**

View File

@ -124,7 +124,7 @@ Converts an aggregate function for tables into an aggregate function for arrays
## -Distinct
Every unique combination of arguments will be aggregated only once. Repeating values are ignored.
Examples: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTINCT x, y)` and so on.
Examples: `sum(DISTINCT x)` (or `sumDistinct(x)`), `groupArray(DISTINCT x)` (or `groupArrayDistinct(x)`), `corrStable(DISTINCT x, y)` (or `corrStableDistinct(x, y)`) and so on.
## -OrDefault

View File

@ -86,7 +86,7 @@ The table below describes how different interval kinds of `Interval` data type a
### Aggregate function parameter binary encoding
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
The table below describes how parameters of `AggregateFunction` and `SimpleAggregateFunction` are encoded.
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
| Parameter type | Binary encoding |
@ -106,7 +106,7 @@ The encoding of a parameter consists of 1 byte indicating the type of the parame
| `String` | `0x0C<var_uint_size><data>` |
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_encoding_N><value_encoding_N>` |
| `IPv4` | `0x10<uint32_little_endian_value>` |
| `IPv6` | `0x11<uint128_little_endian_value>` |
| `UUID` | `0x12<uuid_value>` |

View File

@ -297,99 +297,257 @@ $$)
└───────────────┴────────────────┴───────────────┴──────┴───────┴────────────┴─────────┘
```
## Comparing values of Dynamic type
## Using Dynamic type in functions
Values of `Dynamic` types are compared similar to values of `Variant` type:
Most of the functions support arguments with type `Dynamic`. In this case the function is executed separately on each internal data type stored inside `Dynamic` column.
When the result type of the function depends on the arguments types, the result of such function executed with `Dynamic` arguments will be `Dynamic`. When the result type of the function doesn't depend on the arguments types - the result will be `Nullable(T)` where `T` the usual result type of this function.
Examples:
```sql
CREATE TABLE test (d Dynamic) ENGINE=Memory;
INSERT INTO test VALUES (NULL), (1::Int8), (2::Int16), (3::Int32), (4::Int64);
```
```sql
SELECT d, dynamicType(d) FROM test;
```
```text
┌─d────┬─dynamicType(d)─┐
│ ᴺᵁᴸᴸ │ None │
│ 1 │ Int8 │
│ 2 │ Int16 │
│ 3 │ Int32 │
│ 4 │ Int64 │
└──────┴────────────────┘
```
```sql
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(res) FROM test;
```
```text
┌─d────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
│ 1 │ 2 │ Dynamic │ Int16 │
│ 2 │ 3 │ Dynamic │ Int32 │
│ 3 │ 4 │ Dynamic │ Int64 │
│ 4 │ 5 │ Dynamic │ Int64 │
└──────┴──────┴─────────────────┴──────────────────┘
```
```sql
SELECT d, d + d AS res, toTypeName(res), dynamicType(res) FROM test;
```
```text
┌─d────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
│ 1 │ 2 │ Dynamic │ Int16 │
│ 2 │ 4 │ Dynamic │ Int32 │
│ 3 │ 6 │ Dynamic │ Int64 │
│ 4 │ 8 │ Dynamic │ Int64 │
└──────┴──────┴─────────────────┴──────────────────┘
```
```sql
SELECT d, d < 3 AS res, toTypeName(res) FROM test;
```
```text
┌─d────┬──res─┬─toTypeName(res)─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(UInt8) │
│ 1 │ 1 │ Nullable(UInt8) │
│ 2 │ 1 │ Nullable(UInt8) │
│ 3 │ 0 │ Nullable(UInt8) │
│ 4 │ 0 │ Nullable(UInt8) │
└──────┴──────┴─────────────────┘
```
```sql
SELECT d, exp2(d) AS res, toTypeName(res) FROM test;
```
```sql
┌─d────┬──res─┬─toTypeName(res)───┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(Float64) │
│ 1 │ 2 │ Nullable(Float64) │
│ 2 │ 4 │ Nullable(Float64) │
│ 3 │ 8 │ Nullable(Float64) │
│ 4 │ 16 │ Nullable(Float64) │
└──────┴──────┴───────────────────┘
```
```sql
TRUNCATE TABLE test;
INSERT INTO test VALUES (NULL), ('str_1'), ('str_2');
SELECT d, dynamicType(d) FROM test;
```
```text
┌─d─────┬─dynamicType(d)─┐
│ ᴺᵁᴸᴸ │ None │
│ str_1 │ String │
│ str_2 │ String │
└───────┴────────────────┘
```
```sql
SELECT d, upper(d) AS res, toTypeName(res) FROM test;
```
```text
┌─d─────┬─res───┬─toTypeName(res)──┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(String) │
│ str_1 │ STR_1 │ Nullable(String) │
│ str_2 │ STR_2 │ Nullable(String) │
└───────┴───────┴──────────────────┘
```
```sql
SELECT d, extract(d, '([0-3])') AS res, toTypeName(res) FROM test;
```
```text
┌─d─────┬─res──┬─toTypeName(res)──┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(String) │
│ str_1 │ 1 │ Nullable(String) │
│ str_2 │ 2 │ Nullable(String) │
└───────┴──────┴──────────────────┘
```
```sql
TRUNCATE TABLE test;
INSERT INTO test VALUES (NULL), ([1, 2]), ([3, 4]);
SELECT d, dynamicType(d) FROM test;
```
```text
┌─d─────┬─dynamicType(d)─┐
│ ᴺᵁᴸᴸ │ None │
│ [1,2] │ Array(Int64) │
│ [3,4] │ Array(Int64) │
└───────┴────────────────┘
```
```sql
SELECT d, d[1] AS res, toTypeName(res), dynamicType(res) FROM test;
```
```text
┌─d─────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
│ [1,2] │ 1 │ Dynamic │ Int64 │
│ [3,4] │ 3 │ Dynamic │ Int64 │
└───────┴──────┴─────────────────┴──────────────────┘
```
If function cannot be executed on some type inside `Dynamic` column, the exception will be thrown:
```sql
INSERT INTO test VALUES (42), (43), ('str_1');
SELECT d, dynamicType(d) FROM test;
```
```text
┌─d─────┬─dynamicType(d)─┐
│ 42 │ Int64 │
│ 43 │ Int64 │
│ str_1 │ String │
└───────┴────────────────┘
┌─d─────┬─dynamicType(d)─┐
│ ᴺᵁᴸᴸ │ None │
│ [1,2] │ Array(Int64) │
│ [3,4] │ Array(Int64) │
└───────┴────────────────┘
```
```sql
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(d) FROM test;
```
```text
Received exception:
Code: 43. DB::Exception: Illegal types Array(Int64) and UInt8 of arguments of function plus: while executing 'FUNCTION plus(__table1.d : 3, 1_UInt8 :: 1) -> plus(__table1.d, 1_UInt8) Dynamic : 0'. (ILLEGAL_TYPE_OF_ARGUMENT)
```
We can filter out unneeded types:
```sql
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(res) FROM test WHERE dynamicType(d) NOT IN ('String', 'Array(Int64)', 'None')
```
```text
┌─d──┬─res─┬─toTypeName(res)─┬─dynamicType(res)─┐
│ 42 │ 43 │ Dynamic │ Int64 │
│ 43 │ 44 │ Dynamic │ Int64 │
└────┴─────┴─────────────────┴──────────────────┘
```
Or extract required type as subcolumn:
```sql
SELECT d, d.Int64 + 1 AS res, toTypeName(res) FROM test;
```
```text
┌─d─────┬──res─┬─toTypeName(res)─┐
│ 42 │ 43 │ Nullable(Int64) │
│ 43 │ 44 │ Nullable(Int64) │
│ str_1 │ ᴺᵁᴸᴸ │ Nullable(Int64) │
└───────┴──────┴─────────────────┘
┌─d─────┬──res─┬─toTypeName(res)─┐
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(Int64) │
│ [1,2] │ ᴺᵁᴸᴸ │ Nullable(Int64) │
│ [3,4] │ ᴺᵁᴸᴸ │ Nullable(Int64) │
└───────┴──────┴─────────────────┘
```
## Using Dynamic type in ORDER BY and GROUP BY
During `ORDER BY` and `GROUP BY` values of `Dynamic` types are compared similar to values of `Variant` type:
The result of operator `<` for values `d1` with underlying type `T1` and `d2` with underlying type `T2` of a type `Dynamic` is defined as follows:
- If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared).
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
Examples:
```sql
CREATE TABLE test (d1 Dynamic, d2 Dynamic) ENGINE=Memory;
INSERT INTO test VALUES (42, 42), (42, 43), (42, 'abc'), (42, [1, 2, 3]), (42, []), (42, NULL);
CREATE TABLE test (d Dynamic) ENGINE=Memory;
INSERT INTO test VALUES (42), (43), ('abc'), ('abd'), ([1, 2, 3]), ([]), (NULL);
```
```sql
SELECT d2, dynamicType(d2) as d2_type from test order by d2;
SELECT d, dynamicType(d) FROM test;
```
```text
┌─d2──────┬─d2_type──────┐
│ [] │ Array(Int64) │
│ [1,2,3] │ Array(Int64) │
│ 42 │ Int64 │
│ 43 │ Int64 │
│ abc │ String │
│ ᴺᵁᴸᴸ │ None │
└─────────┴──────────────┘
┌─d───────┬─dynamicType(d)─┐
│ 42 │ Int64 │
│ 43 │ Int64 │
│ abc │ String │
│ abd │ String │
│ [1,2,3] │ Array(Int64) │
│ [] │ Array(Int64) │
│ ᴺᵁᴸᴸ │ None │
└─────────┴────────────────┘
```
```sql
SELECT d1, dynamicType(d1) as d1_type, d2, dynamicType(d2) as d2_type, d1 = d2, d1 < d2, d1 > d2 from test;
```
```text
┌─d1─┬─d1_type─┬─d2──────┬─d2_type──────┬─equals(d1, d2)─┬─less(d1, d2)─┬─greater(d1, d2)─┐
│ 42 │ Int64 │ 42 │ Int64 │ 1 │ 0 │ 0 │
│ 42 │ Int64 │ 43 │ Int64 │ 0 │ 1 │ 0 │
│ 42 │ Int64 │ abc │ String │ 0 │ 1 │ 0 │
│ 42 │ Int64 │ [1,2,3] │ Array(Int64) │ 0 │ 0 │ 1 │
│ 42 │ Int64 │ [] │ Array(Int64) │ 0 │ 0 │ 1 │
│ 42 │ Int64 │ ᴺᵁᴸᴸ │ None │ 0 │ 1 │ 0 │
└────┴─────────┴─────────┴──────────────┴────────────────┴──────────────┴─────────────────┘
```
If you need to find the row with specific `Dynamic` value, you can do one of the following:
- Cast value to the `Dynamic` type:
```sql
SELECT * FROM test WHERE d2 == [1,2,3]::Array(UInt32)::Dynamic;
```
```text
┌─d1─┬─d2──────┐
│ 42 │ [1,2,3] │
└────┴─────────┘
```
- Compare `Dynamic` subcolumn with required type:
```sql
SELECT * FROM test WHERE d2.`Array(Int65)` == [1,2,3] -- or using variantElement(d2, 'Array(UInt32)')
```
```text
┌─d1─┬─d2──────┐
│ 42 │ [1,2,3] │
└────┴─────────┘
```
Sometimes it can be useful to make additional check on dynamic type as subcolumns with complex types like `Array/Map/Tuple` cannot be inside `Nullable` and will have default values instead of `NULL` on rows with different types:
```sql
SELECT d2, d2.`Array(Int64)`, dynamicType(d2) FROM test WHERE d2.`Array(Int64)` == [];
```
```text
┌─d2───┬─d2.Array(UInt32)─┬─dynamicType(d2)─┐
│ 42 │ [] │ Int64 │
│ 43 │ [] │ Int64 │
│ abc │ [] │ String │
│ [] │ [] │ Array(Int32) │
│ ᴺᵁᴸᴸ │ [] │ None │
└──────┴──────────────────┴─────────────────┘
SELECT d, dynamicType(d) FROM test ORDER BY d;
```
```sql
SELECT d2, d2.`Array(Int64)`, dynamicType(d2) FROM test WHERE dynamicType(d2) == 'Array(Int64)' AND d2.`Array(Int64)` == [];
```
```text
┌─d2─┬─d2.Array(UInt32)─┬─dynamicType(d2)─┐
│ [] │ [] │ Array(Int64) │
└────┴──────────────────┴─────────────────┘
┌─d───────┬─dynamicType(d)─┐
│ [] │ Array(Int64) │
│ [1,2,3] │ Array(Int64) │
│ 42 │ Int64 │
│ 43 │ Int64 │
│ abc │ String │
│ abd │ String │
│ ᴺᵁᴸᴸ │ None │
└─────────┴────────────────┘
```
**Note:** values of dynamic types with different numeric types are considered as different values and not compared between each other, their type names are compared instead.
@ -411,6 +569,21 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
└─────┴────────────────┘
```
```sql
SELECT d, dynamicType(d) FROM test GROUP by d;
```
```text
┌─d───┬─dynamicType(d)─┐
│ 1 │ Int64 │
│ 100 │ UInt32 │
│ 1 │ UInt32 │
│ 100 │ Int64 │
└─────┴────────────────┘
```
**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
## Reaching the limit in number of different data types stored inside Dynamic
`Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic).

View File

@ -226,9 +226,9 @@ Result:
## bitTestAll
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
Returns result of [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
The conjuction for bit-wise operations:
The conjunction for bit-wise operations:
0 AND 0 = 0
@ -251,7 +251,7 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...)
**Returned value**
- Result of the logical conjuction. [UInt8](../data-types/int-uint.md).
- Result of the logical conjunction. [UInt8](../data-types/int-uint.md).
**Example**

View File

@ -1972,7 +1972,7 @@ Result:
## toISOYear
Converts a date, or date with time, to a UInt16 number containing the ISO Year number.
Converts a date, or date with time, to the ISO year as a UInt16 number.
**Syntax**
@ -1982,11 +1982,11 @@ toISOYear(value)
**Arguments**
- `value` — The value with date or date with time.
- `value` — The value with date or date with time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
**Returned value**
- `value` converted to the current ISO year number. [UInt16](../data-types/int-uint.md).
- The input value converted to a ISO year number. [UInt16](../data-types/int-uint.md).
**Example**
@ -1995,7 +1995,7 @@ Query:
```sql
SELECT
toISOYear(toDate('2024/10/02')) as year1,
toISOYear(toDateTime('2024/10/02 01:30:00')) as year2
toISOYear(toDateTime('2024-10-02 01:30:00')) as year2
```
Result:
@ -2010,6 +2010,38 @@ Result:
Converts a date, or date with time, to a UInt8 number containing the ISO Week number.
**Syntax**
```sql
toISOWeek(value)
```
**Arguments**
- `value` — The value with date or date with time.
**Returned value**
- `value` converted to the current ISO week number. [UInt8](../data-types/int-uint.md).
**Example**
Query:
```sql
SELECT
toISOWeek(toDate('2024/10/02')) AS week1,
toISOWeek(toDateTime('2024/10/02 01:30:00')) AS week2
```
Response:
```response
┌─week1─┬─week2─┐
│ 40 │ 40 │
└───────┴───────┘
```
## toWeek
This function returns the week number for date or datetime. The two-argument form of `toWeek()` enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0.

View File

@ -288,11 +288,11 @@ toIPv4OrDefault(value)
**Arguments**
- `value`The value with IPv4 address.
- `value`A string-encoded IPv4 address. [String](../data-types/string.md)
**Returned value**
- `value` converted to the current IPv4 address. [String](../data-types/string.md).
- `value` converted to an IPv4 address. [IPv4](../data-types/ipv4.md).
**Example**
@ -324,11 +324,11 @@ toIPv4OrNull(value)
**Arguments**
- `value`The value with IPv4 address.
- `value`A string-encoded IPv4 address. [String](../data-types/string.md)
**Returned value**
- `value` converted to the current IPv4 address. [String](../data-types/string.md).
- `value` converted to an IPv4 address. [IPv4](../data-types/ipv4.md).
**Example**

View File

@ -202,7 +202,7 @@ Result:
Returns the type name of the passed argument.
If `NULL` is passed, then the function returns type `Nullable(Nothing)`, which corresponds to ClickHouse's internal `NULL` representation.
If `NULL` is passed, the function returns type `Nullable(Nothing)`, which corresponds to ClickHouse's internal `NULL` representation.
**Syntax**
@ -212,11 +212,11 @@ toTypeName(value)
**Arguments**
- `value`The value with any arbitrary.
- `value`A value of arbitrary type.
**Returned value**
- `value` converted to the current data type name. [String](../data-types/string.md).
- The data type name of the input value. [String](../data-types/string.md).
**Example**
@ -410,13 +410,37 @@ Code: 44. DB::Exception: Received from localhost:9000. DB::Exception: Illegal ty
## ignore
Accepts any arguments, including `NULL` and does nothing. Always returns 0.
The argument is internally still evaluated. Useful e.g. for benchmarks.
Accepts arbitrary arguments and unconditionally returns `0`.
The argument is still evaluated internally, making it useful for eg. benchmarking.
**Syntax**
```sql
ignore(x)
ignore([arg1[, arg2[, ...]])
```
**Arguments**
- Accepts arbitrarily many arguments of arbitrary type, including `NULL`.
**Returned value**
- Returns `0`.
**Example**
Query:
```sql
SELECT ignore(0, 'ClickHouse', NULL);
```
Result:
```response
┌─ignore(0, 'ClickHouse', NULL)─┐
│ 0 │
└───────────────────────────────┘
```
## sleep
@ -524,13 +548,9 @@ Useful in table engine parameters of `CREATE TABLE` queries where you need to sp
currentDatabase()
```
**Arguments**
None.
**Returned value**
- `value` returns the current database name. [String](../data-types/string.md).
- Returns the current database name. [String](../data-types/string.md).
**Example**
@ -579,6 +599,42 @@ Result:
└───────────────┘
```
## currentSchemas
Returns a single-element array with the name of the current database schema.
**Syntax**
```sql
currentSchemas(bool)
```
Alias: `current_schemas`.
**Arguments**
- `bool`: A boolean value. [Bool](../data-types/boolean.md).
:::note
The boolean argument is ignored. It only exists for the sake of compatibility with the [implementation](https://www.postgresql.org/docs/7.3/functions-misc.html) of this function in PostgreSQL.
:::
**Returned values**
- Returns a single-element array with the name of the current database
**Example**
```sql
SELECT currentSchemas(true);
```
Result:
```response
['default']
```
## isConstant
Returns whether the argument is a constant expression.
@ -1821,7 +1877,7 @@ toColumnTypeName(value)
**Example**
Difference between `toTypeName ' and ' toColumnTypeName`:
Difference between `toTypeName` and `toColumnTypeName`:
```sql
SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime))
@ -3897,13 +3953,15 @@ Retrieves the connection ID of the client that submitted the current query and r
connectionId()
```
Alias: `connection_id`.
**Parameters**
None.
**Returned value**
Returns an integer of type UInt64.
The current connection ID. [UInt64](../data-types/int-uint.md).
**Implementation details**
@ -3921,40 +3979,6 @@ SELECT connectionId();
0
```
## connection_id
An alias of `connectionId`. Retrieves the connection ID of the client that submitted the current query and returns it as a UInt64 integer.
**Syntax**
```sql
connection_id()
```
**Parameters**
None.
**Returned value**
Returns an integer of type UInt64.
**Implementation details**
This function is most useful in debugging scenarios or for internal purposes within the MySQL handler. It was created for compatibility with [MySQL's `CONNECTION_ID` function](https://dev.mysql.com/doc/refman/8.0/en/information-functions.html#function_connection-id) It is not typically used in production queries.
**Example**
Query:
```sql
SELECT connection_id();
```
```response
0
```
## getClientHTTPHeader
Get the value of an HTTP header.

View File

@ -755,7 +755,7 @@ Result:
## match {#match}
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular syntax](https://github.com/google/re2/wiki/Syntax).
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
Matching is based on UTF-8, e.g. `.` matches the Unicode code point `¥` which is represented in UTF-8 using two bytes. The regular
expression must not contain null bytes. If the haystack or the pattern are not valid UTF-8, then the behavior is undefined.
@ -852,9 +852,10 @@ multiFuzzyMatchAllIndices(haystack, distance, \[pattern<sub>1</sub>, pattern<sub
## extract
Extracts a fragment of a string using a regular expression. If `haystack` does not match the `pattern` regex, an empty string is returned.
Returns the first match of a regular expression in a string.
If `haystack` does not match the `pattern` regex, an empty string is returned.
For regex without subpatterns, the function uses the fragment that matches the entire regex. Otherwise, it uses the fragment that matches the first subpattern.
If the regular expression has capturing groups, the function matches the input string against the first capturing group.
**Syntax**
@ -862,13 +863,36 @@ For regex without subpatterns, the function uses the fragment that matches the e
extract(haystack, pattern)
```
*Arguments**
- `haystack` — Input string. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
**Returned value**
- The first match of the regular expression in the haystack string. [String](../data-types/string.md).
**Example**
Query:
```sql
SELECT extract('number: 1, number: 2, number: 3', '\\d+') AS result;
```
Result:
```response
┌─result─┐
│ 1 │
└────────┘
```
## extractAll
Extracts all fragments of a string using a regular expression. If `haystack` does not match the `pattern` regex, an empty string is returned.
Returns an array of all matches of a regular expression in a string. If `haystack` does not match the `pattern` regex, an empty string is returned.
Returns an array of strings consisting of all matches of the regex.
The behavior with respect to subpatterns is the same as in function `extract`.
The behavior with respect to sub-patterns is the same as in function [`extract`](#extract).
**Syntax**
@ -876,6 +900,31 @@ The behavior with respect to subpatterns is the same as in function `extract`.
extractAll(haystack, pattern)
```
*Arguments**
- `haystack` — Input string. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
**Returned value**
- Array of matches of the regular expression in the haystack string. [Array](../data-types/array.md)([String](../data-types/string.md)).
**Example**
Query:
```sql
SELECT extractAll('number: 1, number: 2, number: 3', '\\d+') AS result;
```
Result:
```response
┌─result────────┐
│ ['1','2','3'] │
└───────────────┘
```
## extractAllGroupsHorizontal
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc.
@ -891,7 +940,7 @@ extractAllGroupsHorizontal(haystack, pattern)
**Arguments**
- `haystack` — Input string. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
**Returned value**
@ -915,6 +964,39 @@ Result:
└──────────────────────────────────────────────────────────────────────────────────────────┘
```
## extractGroups
Match all groups of given input string with a given regular expression, returns an array of arrays of matches.
**Syntax**
``` sql
extractGroups(haystack, pattern)
```
**Arguments**
- `haystack` — Input string. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
**Returned value**
- Array of arrays of matches. [Array](../data-types/array.md).
**Example**
``` sql
SELECT extractGroups('hello abc=111 world', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') AS result;
```
Result:
``` text
┌─result────────┐
│ ['abc','111'] │
└───────────────┘
```
## extractAllGroupsVertical
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where each array includes matching fragments from every group. Fragments are grouped in order of appearance in the `haystack`.
@ -928,7 +1010,7 @@ extractAllGroupsVertical(haystack, pattern)
**Arguments**
- `haystack` — Input string. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
**Returned value**
@ -1484,7 +1566,7 @@ countMatches(haystack, pattern)
**Arguments**
- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
- `pattern` — The regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
**Returned value**
@ -1529,7 +1611,7 @@ countMatchesCaseInsensitive(haystack, pattern)
**Arguments**
- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal).
- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
- `pattern` — The regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
**Returned value**

View File

@ -5230,15 +5230,52 @@ Result:
Also see the `toUnixTimestamp` function.
## toFixedString(s, N)
## toFixedString
Converts a [String](../data-types/string.md) type argument to a [FixedString(N)](../data-types/fixedstring.md) type (a string of fixed length N).
If the string has fewer bytes than N, it is padded with null bytes to the right. If the string has more bytes than N, an exception is thrown.
## toStringCutToZero(s)
**Syntax**
```sql
toFixedString(s, N)
```
**Arguments**
- `s` — A String to convert to a fixed string. [String](../data-types/string.md).
- `N` — Length N. [UInt8](../data-types/int-uint.md)
**Returned value**
- An N length fixed string of `s`. [FixedString](../data-types/fixedstring.md).
**Example**
Query:
``` sql
SELECT toFixedString('foo', 8) AS s;
```
Result:
```response
┌─s─────────────┐
│ foo\0\0\0\0\0 │
└───────────────┘
```
## toStringCutToZero
Accepts a String or FixedString argument. Returns the String with the content truncated at the first zero byte found.
**Syntax**
```sql
toStringCutToZero(s)
```
**Example**
Query:

View File

@ -41,7 +41,7 @@ ORDER BY ts, event_type;
│ 2020-01-02 00:00:00 │ imp │ 2 │
└─────────────────────┴────────────┴─────────────────┘
-- Let's add the new measurment `cost`
-- Let's add the new measurement `cost`
-- and the new dimension `browser`.
ALTER TABLE events

View File

@ -46,7 +46,7 @@ The `CHECK TABLE` query supports the following table engines:
- [StripeLog](../../engines/table-engines/log-family/stripelog.md)
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
Performed over the tables with another table engines causes an `NOT_IMPLEMETED` exception.
Performed over the tables with another table engines causes an `NOT_IMPLEMENTED` exception.
Engines from the `*Log` family do not provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner.

View File

@ -442,7 +442,7 @@ DEFLATE_QPL is not available in ClickHouse Cloud.
### Specialized Codecs
These codecs are designed to make compression more effective by exploiting specific features of the data. Some of these codecs do not compress data themself, they instead preprocess the data such that a second compression stage using a general-purpose codec can achieve a higher data compression rate.
These codecs are designed to make compression more effective by exploiting specific features of the data. Some of these codecs do not compress data themselves, they instead preprocess the data such that a second compression stage using a general-purpose codec can achieve a higher data compression rate.
#### Delta

View File

@ -184,14 +184,6 @@ Differences from regular non-refreshable materialized views:
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
:::
:::note
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
* not compatible with Replicated database or table engines
* It is not supported in ClickHouse Cloud
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
* no limit on number of concurrent refreshes.
:::
### Refresh Schedule
Example refresh schedules:
@ -202,7 +194,11 @@ REFRESH EVERY 1 MONTH OFFSET 5 DAY 2 HOUR -- on 6th day of every month, at 2:00
REFRESH EVERY 2 WEEK OFFSET 5 DAY 15 HOUR 10 MINUTE -- every other Saturday, at 3:10 pm
REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc
REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax error, OFFSET is not allowed with AFTER
REFRESH EVERY 1 WEEK 2 DAYS -- every 9 days, not on any particular day of the week or month;
-- specifically, when day number (since 1969-12-29) is divisible by 9
REFRESH EVERY 5 MONTHS -- every 5 months, different months each year (as 12 is not divisible by 5);
-- specifically, when month number (since 1970-01) is divisible by 5
```
`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.:
@ -214,6 +210,16 @@ At most one refresh may be running at a time, for a given view. E.g. if a view w
Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule.
### In Replicated DB
If the refreshable materialized view is in a [Replicated database](../../../engines/database-engines/replicated.md), the replicas coordinate with each other such that only one replica performs the refresh at each scheduled time. [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) table engine is required, so that all replicas see the data produced by the refresh.
In `APPEND` mode, coordination can be disabled using `SETTINGS all_replicas = 1`. This makes replicas do refreshes independently of each other. In this case ReplicatedMergeTree is not required.
In non-`APPEND` mode, only coordinated refreshing is supported. For uncoordinated, use `Atomic` database and `CREATE ... ON CLUSTER` query to create refreshable materialized views on all replicas.
The coordination is done through Keeper. The znode path is determined by [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) server setting.
### Dependencies {#refresh-dependencies}
`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views:
@ -277,6 +283,8 @@ The status of all refreshable materialized views is available in table [`system.
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
To wait for a refresh to complete, use [`SYSTEM WAIT VIEW`](../system.md#refreshable-materialized-views). In particular, useful for waiting for initial refresh after creating a view.
:::note
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
:::

View File

@ -233,15 +233,20 @@ Hierarchy of privileges:
- `addressToSymbol`
- `demangle`
- [SOURCES](#sources)
- `AZURE`
- `FILE`
- `URL`
- `REMOTE`
- `YSQL`
- `ODBC`
- `JDBC`
- `HDFS`
- `S3`
- `HIVE`
- `JDBC`
- `MONGO`
- `MYSQL`
- `ODBC`
- `POSTGRES`
- `REDIS`
- `REMOTE`
- `S3`
- `SQLITE`
- `URL`
- [dictGet](#dictget)
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
- [NAMED COLLECTION ADMIN](#named-collection-admin)
@ -510,15 +515,20 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
- `SOURCES`. Level: `GROUP`
- `AZURE`. Level: `GLOBAL`
- `FILE`. Level: `GLOBAL`
- `URL`. Level: `GLOBAL`
- `REMOTE`. Level: `GLOBAL`
- `YSQL`. Level: `GLOBAL`
- `ODBC`. Level: `GLOBAL`
- `JDBC`. Level: `GLOBAL`
- `HDFS`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL`
- `HIVE`. Level: `GLOBAL`
- `JDBC`. Level: `GLOBAL`
- `MONGO`. Level: `GLOBAL`
- `MYSQL`. Level: `GLOBAL`
- `ODBC`. Level: `GLOBAL`
- `POSTGRES`. Level: `GLOBAL`
- `REDIS`. Level: `GLOBAL`
- `REMOTE`. Level: `GLOBAL`
- `S3`. Level: `GLOBAL`
- `SQLITE`. Level: `GLOBAL`
- `URL`. Level: `GLOBAL`
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.

View File

@ -29,7 +29,7 @@ The condition could be any expression based on your requirements.
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
```sql
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,8);
```
Result:

View File

@ -351,11 +351,15 @@ Shows privileges for a user.
**Syntax**
``` sql
SHOW GRANTS [FOR user1 [, user2 ...]]
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
```
If user is not specified, the query returns privileges for the current user.
The `WITH IMPLICIT` modifier allows to show the implicit grants (e.g., `GRANT SELECT ON system.one`)
The `FINAL` modifier merges all grants from the user and its granted roles (with inheritance)
## SHOW CREATE USER
Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md).

View File

@ -565,3 +565,13 @@ If there's a refresh in progress for the given view, interrupt and cancel it. Ot
```sql
SYSTEM CANCEL VIEW [db.]name
```
### SYSTEM WAIT VIEW
Waits for the running refresh to complete. If no refresh is running, returns immediately. If the latest refresh attempt failed, reports an error.
Can be used right after creating a new refreshable materialized view (without EMPTY keyword) to wait for the initial refresh to complete.
```sql
SYSTEM WAIT VIEW [db.]name
```

View File

@ -6,7 +6,7 @@ sidebar_label: iceberg
# iceberg Table Function
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure, HDFS or locally stored.
## Syntax
@ -17,13 +17,16 @@ icebergS3(named_collection[, option=value [,..]])
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
icebergAzure(named_collection[, option=value [,..]])
icebergHDFS(path_to_table, [,format] [,compression_method])
icebergHDFS(named_collection[, option=value [,..]])
icebergLocal(path_to_table, [,format] [,compression_method])
icebergLocal(named_collection[, option=value [,..]])
```
## Arguments
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage`, `HDFS` and `file` correspondingly.
`format` stands for the format of data files in the Iceberg table.
**Returned value**
@ -36,7 +39,7 @@ SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_tab
```
:::important
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure`, `icebergHDFS` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure`, `IcebergHDFS` and `IcebergLocal` table engines.
:::
## Defining a named collection

View File

@ -9,6 +9,19 @@ sidebar_label: "Визуальные интерфейсы от сторонни
## С открытым исходным кодом {#s-otkrytym-iskhodnym-kodom}
### ChartDB {#chartdb}
[ChartDB](https://chartdb.io) — бесплатный и открытый инструмент для визуализации и проектирования схем баз данных, включая ClickHouse, с помощью одного запроса. Разработан на базе React, обеспечивает удобный и простой интерфейс, не требует ввода учетных данных или регистрации.
Основные возможности:
- Визуализация схем: мгновенно импортируйте и визуализируйте схему ClickHouse, включая ER-диаграммы с материализованными представлениями и стандартными представлениями, показывающими ссылки на таблицы;
- Экспорт DDL с поддержкой ИИ: легко генерируйте DDL-скрипты для лучшего управления и документирования схем;
- Поддержка различных SQL-диалектов: совместим с широким спектром SQL-диалектов, что делает его универсальным для разных сред баз данных;
- Без регистрации и учетных данных: весь функционал доступен прямо в браузере, обеспечивая бесшовное и безопасное использование.
[Исходный код ChartDB](https://github.com/chartdb/chartdb).
### Tabix {#tabix}
Веб-интерфейс для ClickHouse в проекте [Tabix](https://github.com/tabixio/tabix).

View File

@ -6,7 +6,7 @@ sidebar_label: "Настройки пользователей"
# Настройки пользователей {#nastroiki-polzovatelei}
Раздел `users` конфигурационного файла `user.xml` содержит настройки для пользователей.
Раздел `users` конфигурационного файла `users.xml` содержит настройки для пользователей.
:::note Информация
Для управления пользователями рекомендуется использовать [SQL-ориентированный воркфлоу](../access-rights.md#access-control), который также поддерживается в ClickHouse.

View File

@ -93,7 +93,7 @@ WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
## -Distinct {#agg-functions-combinator-distinct}
При наличии комбинатора Distinct, каждое уникальное значение аргументов, будет учитано в агрегатной функции только один раз.
Примеры: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTINCT x, y)` и т.п.
Примеры: `sum(DISTINCT x)` (или `sumDistinct(x)`), `groupArray(DISTINCT x)` (или `groupArrayDistinct(x)`), `corrStable(DISTINCT x, y)` (или `corrStableDistinct(x, y)`) и т.п.
## -OrDefault {#agg-functions-combinator-ordefault}

View File

@ -464,7 +464,7 @@ GRANT INSERT(x,y) ON db.table TO john
- `FILE`. Уровень: `GLOBAL`
- `URL`. Уровень: `GLOBAL`
- `REMOTE`. Уровень: `GLOBAL`
- `YSQL`. Уровень: `GLOBAL`
- `MYSQL`. Уровень: `GLOBAL`
- `ODBC`. Уровень: `GLOBAL`
- `JDBC`. Уровень: `GLOBAL`
- `HDFS`. Уровень: `GLOBAL`

View File

@ -29,7 +29,7 @@ FROM table2
Запрос:
``` sql
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,8);
```
Результат:

View File

@ -234,11 +234,14 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
### Синтаксис {#show-grants-syntax}
``` sql
SHOW GRANTS [FOR user]
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
```
Если пользователь не задан, запрос возвращает привилегии текущего пользователя.
`WITH IMPLICIT` добавляет неявные привилегии (например `GRANT SELECT ON system.one`).
`FINAL` объединяет все текущие привилегии с привилегиями всех ролей пользователя (с наследованием).
## SHOW CREATE USER {#show-create-user-statement}

View File

@ -5,6 +5,19 @@ slug: /zh/interfaces/third-party/gui
## 开源 {#kai-yuan}
### ChartDB {#chartdb}
ClickHouse 的数据库模式可视化工具 [ChartDB](https://chartdb.io)。
主要功能:
- 一键导入并可视化 ClickHouse 模式,包括带有物化视图和标准视图的 ER 图表,并显示表之间的引用关系。
- 支持 AI 驱动的 DDL 导出功能,便于数据库模式管理和文档生成。
- 支持多种 SQL 方言,适用于各种数据库环境。
- 直接在浏览器中使用,无需数据库凭证或注册,使用简单且安全。
[ChartDB 源代码](https://github.com/chartdb/chartdb).
### Tabix {#tabix}
ClickHouse Web 界面 [Tabix](https://github.com/tabixio/tabix).

View File

@ -8,7 +8,7 @@ sidebar_label: "\u7528\u6237\u8BBE\u7F6E"
# 用户设置 {#user-settings}
`user.xml` 中的 `users` 配置段包含了用户配置
`users.xml` 中的 `users` 配置段包含了用户配置
:::note
ClickHouse还支持 [SQL驱动的工作流](/docs/en/operations/access-rights#access-control) 用于管理用户。 我们建议使用它。

View File

@ -220,7 +220,7 @@ SELECT bitTest(43, 2);
## bitTestAll {#bittestall}
返回给定位置所有位的 [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
返回给定位置所有位的 [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数从0开始计数。
与运算的结果:

View File

@ -110,7 +110,7 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
### 语法 {#show-grants-syntax}
``` sql
SHOW GRANTS [FOR user]
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
```
如果未指定用户,输出当前用户的权限

View File

@ -155,7 +155,7 @@ function _clickhouse_quote()
function _clickhouse_get_options()
{
# By default --help will not print all settings, this is done only under --verbose
"$@" --help --verbose 2>&1 | awk -F '[ ,=<>.]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 1, 1) == "-" && length($i) > 1) print $i; } }' | sort -u
"$@" --help --verbose 2>&1 | LANG=c awk -F '[ ,=<>.]' '{ for (i=1; i <= NF; ++i) { if (substr($i, 1, 1) == "-" && length($i) > 1) print $i; } }' | sort -u
}
function _complete_for_clickhouse_generic_bin_impl()

View File

@ -445,15 +445,13 @@ private:
shutdown = true;
throw;
}
else
{
std::cerr << getCurrentExceptionMessage(print_stacktrace,
true /*check embedded stack trace*/) << std::endl;
size_t info_index = round_robin ? 0 : connection_index;
++comparison_info_per_interval[info_index]->errors;
++comparison_info_total[info_index]->errors;
}
std::cerr << getCurrentExceptionMessage(print_stacktrace,
true /*check embedded stack trace*/) << std::endl;
size_t info_index = round_robin ? 0 : connection_index;
++comparison_info_per_interval[info_index]->errors;
++comparison_info_total[info_index]->errors;
}
// Count failed queries toward executed, so that we'd reach
// max_iterations even if every run fails.

View File

@ -346,7 +346,9 @@ try
processConfig();
adjustSettings();
initTTYBuffer(toProgressOption(config().getString("progress", "default")));
initTTYBuffer(toProgressOption(config().getString("progress", "default")),
toProgressOption(config().getString("progress-table", "default")));
initKeystrokeInterceptor();
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
{
@ -478,27 +480,23 @@ void Client::connect()
}
catch (const Exception & e)
{
/// This problem can't be fixed with reconnection so it is not attempted
if (e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
{
/// This problem can't be fixed with reconnection so it is not attempted
throw;
}
else
{
if (attempted_address_index == hosts_and_ports.size() - 1)
throw;
if (is_interactive)
{
std::cerr << "Connection attempt to database at "
<< connection_parameters.host << ":" << connection_parameters.port
<< " resulted in failure"
<< std::endl
<< getExceptionMessage(e, false)
<< std::endl
<< "Attempting connection to the next provided address"
<< std::endl;
}
if (attempted_address_index == hosts_and_ports.size() - 1)
throw;
if (is_interactive)
{
std::cerr << "Connection attempt to database at "
<< connection_parameters.host << ":" << connection_parameters.port
<< " resulted in failure"
<< std::endl
<< getExceptionMessage(e, false)
<< std::endl
<< "Attempting connection to the next provided address"
<< std::endl;
}
}
}
@ -772,7 +770,7 @@ bool Client::processWithFuzzing(const String & full_query)
else
this_query_runs = 1;
}
else if (const auto * insert = orig_ast->as<ASTInsertQuery>())
else if (const auto * /*insert*/ _ = orig_ast->as<ASTInsertQuery>())
{
this_query_runs = 1;
queries_for_fuzzed_tables = fuzzer.getInsertQueriesForFuzzedTables(full_query);

View File

@ -46,7 +46,7 @@ public:
path_from,
disk_from.getDisk()->getName());
}
else if (disk_from.getDisk()->isFile(path_from))
if (disk_from.getDisk()->isFile(path_from))
{
auto target_location = getTargetLocation(path_from, disk_to, path_to);
if (!disk_to.getDisk()->exists(target_location) || disk_to.getDisk()->isFile(target_location))
@ -77,7 +77,7 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory {} with directory {}", path_to, target_location);
}
else if (!disk_to.getDisk()->exists(target_location))
if (!disk_to.getDisk()->exists(target_location))
{
disk_to.getDisk()->createDirectory(target_location);
}

View File

@ -72,13 +72,9 @@ private:
auto path = [&]() -> String
{
if (relative_path.ends_with("/"))
{
return relative_path + file_name;
}
else
{
return relative_path + "/" + file_name;
}
return relative_path + "/" + file_name;
}();
if (disk.isDirectory(path))
{

View File

@ -53,10 +53,8 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot move '{}' to '{}': Directory not empty", path_from, target_location);
}
else
{
disk.getDisk()->moveDirectory(path_from, target_location);
}
disk.getDisk()->moveDirectory(path_from, target_location);
}
}
else if (!disk.getDisk()->exists(path_from))

View File

@ -32,16 +32,14 @@ public:
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} on disk {} doesn't exist", path, disk.getDisk()->getName());
}
else if (disk.getDisk()->isDirectory(path))
if (disk.getDisk()->isDirectory(path))
{
if (!recursive)
{
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot remove '{}': Is a directory", path);
}
else
{
disk.getDisk()->removeRecursive(path);
}
disk.getDisk()->removeRecursive(path);
}
else
{

View File

@ -33,14 +33,10 @@ public:
auto in = [&]() -> std::unique_ptr<ReadBufferFromFileBase>
{
if (!path_from.has_value())
{
return std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
}
else
{
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
}
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
}();
auto out = disk.getDisk()->writeFile(path_to);

View File

@ -127,68 +127,58 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
}
return getEmptyCompletion(command->command_name);
}
else if (arguments.size() == 1)
if (arguments.size() == 1)
{
String command_prefix = arguments[0];
return getCommandsToComplete(command_prefix);
}
else
{
String last_token = arguments.back();
CommandPtr command;
try
{
command = getCommandByName(arguments[0]);
}
catch (...)
{
return {last_token};
}
std::vector<String> answer = {};
if (command->command_name == "help")
{
return getCommandsToComplete(last_token);
}
else
{
answer = [&]() -> std::vector<String>
{
if (multidisk_commands.contains(command->command_name))
{
return client->getAllFilesByPatternFromAllDisks(last_token);
}
else
{
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
}
}();
for (const auto & disk_name : client->getAllDiskNames())
{
if (disk_name.starts_with(last_token))
{
answer.push_back(disk_name);
}
}
for (const auto & option : command->options_description.options())
{
String option_sign = "--" + option->long_name();
if (option_sign.starts_with(last_token))
{
answer.push_back(option_sign);
}
}
}
if (!answer.empty())
String last_token = arguments.back();
CommandPtr command;
try
{
command = getCommandByName(arguments[0]);
}
catch (...)
{
return {last_token};
}
std::vector<String> answer = {};
if (command->command_name == "help")
return getCommandsToComplete(last_token);
answer = [&]() -> std::vector<String>
{
if (multidisk_commands.contains(command->command_name))
return client->getAllFilesByPatternFromAllDisks(last_token);
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
}();
for (const auto & disk_name : client->getAllDiskNames())
{
if (disk_name.starts_with(last_token))
{
std::sort(answer.begin(), answer.end());
return answer;
}
else
{
return {last_token};
answer.push_back(disk_name);
}
}
for (const auto & option : command->options_description.options())
{
String option_sign = "--" + option->long_name();
if (option_sign.starts_with(last_token))
{
answer.push_back(option_sign);
}
}
if (!answer.empty())
{
std::sort(answer.begin(), answer.end());
return answer;
}
return {last_token};
}
bool DisksApp::processQueryText(const String & text)
@ -210,11 +200,11 @@ bool DisksApp::processQueryText(const String & text)
catch (DB::Exception & err)
{
int code = getCurrentExceptionCode();
if (code == ErrorCodes::LOGICAL_ERROR)
{
throw std::move(err);
}
else if (code == ErrorCodes::BAD_ARGUMENTS)
if (code == ErrorCodes::BAD_ARGUMENTS)
{
std::cerr << err.message() << "\n"
<< "\n";

View File

@ -49,10 +49,8 @@ std::vector<String> DiskWithPath::listAllFilesByPath(const String & any_path) co
disk->listFiles(getRelativeFromRoot(any_path), file_names);
return file_names;
}
else
{
return {};
}
return {};
}
std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) const
@ -61,39 +59,30 @@ std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) c
{
auto slash_pos = pattern.find_last_of('/');
if (slash_pos >= pattern.size())
{
return {"", pattern};
}
else
{
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
}
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
}();
if (!isDirectory(path_before))
{
return {};
}
else
std::vector<String> file_names = listAllFilesByPath(path_before);
std::vector<String> answer;
for (const auto & file_name : file_names)
{
std::vector<String> file_names = listAllFilesByPath(path_before);
std::vector<String> answer;
for (const auto & file_name : file_names)
if (file_name.starts_with(path_after))
{
if (file_name.starts_with(path_after))
String file_pattern = path_before + file_name;
if (isDirectory(file_pattern))
{
String file_pattern = path_before + file_name;
if (isDirectory(file_pattern))
{
file_pattern = file_pattern + "/";
}
answer.push_back(file_pattern);
file_pattern = file_pattern + "/";
}
answer.push_back(file_pattern);
}
return answer;
}
return answer;
};
void DiskWithPath::setPath(const String & any_path)

View File

@ -39,13 +39,9 @@ DiskWithPath & ICommand::getDiskWithPath(DisksClient & client, const CommandLine
{
auto disk_name = getValueFromCommandLineOptionsWithOptional<String>(options, name);
if (disk_name.has_value())
{
return client.getDiskWithPath(disk_name.value());
}
else
{
return client.getCurrentDiskWithPath();
}
return client.getCurrentDiskWithPath();
}
}

View File

@ -63,39 +63,27 @@ protected:
static T getValueFromCommandLineOptionsThrow(const CommandLineOptions & options, const String & name)
{
if (options.count(name))
{
return getValueFromCommandLineOptions<T>(options, name);
}
else
{
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
}
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
}
template <typename T>
static T getValueFromCommandLineOptionsWithDefault(const CommandLineOptions & options, const String & name, const T & default_value)
{
if (options.count(name))
{
return getValueFromCommandLineOptions<T>(options, name);
}
else
{
return default_value;
}
return default_value;
}
template <typename T>
static std::optional<T> getValueFromCommandLineOptionsWithOptional(const CommandLineOptions & options, const String & name)
{
if (options.count(name))
{
return std::optional{getValueFromCommandLineOptions<T>(options, name)};
}
else
{
return std::nullopt;
}
return std::nullopt;
}
DiskWithPath & getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name);

View File

@ -110,8 +110,8 @@ static auto executeScript(const std::string & command, bool throw_on_error = fal
sh->wait();
return 0;
}
else
return sh->tryWait();
return sh->tryWait();
}
static bool ask(std::string question)

View File

@ -26,17 +26,15 @@ CatBoostLibraryHandlerPtr CatBoostLibraryHandlerFactory::tryGetModel(const Strin
if (found)
return handler->second;
else
if (create_if_not_found)
{
if (create_if_not_found)
{
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
library_handlers.emplace(model_path, new_handler);
LOG_DEBUG(log, "Loaded catboost library handler for model path '{}'", model_path);
return new_handler;
}
return nullptr;
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
library_handlers.emplace(model_path, new_handler);
LOG_DEBUG(log, "Loaded catboost library handler for model path '{}'", model_path);
return new_handler;
}
return nullptr;
}
void CatBoostLibraryHandlerFactory::removeModel(const String & model_path)

View File

@ -25,7 +25,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
{
if (uri.getPath() == "/extdict_ping")
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(getContext());
else if (uri.getPath() == "/catboost_ping")
if (uri.getPath() == "/catboost_ping")
return std::make_unique<CatBoostLibraryBridgeExistsHandler>(getContext());
}
@ -33,7 +33,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
{
if (uri.getPath() == "/extdict_request")
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(getContext());
else if (uri.getPath() == "/catboost_request")
if (uri.getPath() == "/catboost_request")
return std::make_unique<CatBoostLibraryBridgeRequestHandler>(getContext());
}

View File

@ -158,11 +158,9 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
out.finalize();
return;
}
else
{
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
lib_new = true;
}
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
lib_new = true;
}
if (lib_new)
{

View File

@ -518,7 +518,9 @@ try
SCOPE_EXIT({ cleanup(); });
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")),
toProgressOption(config().getString("progress-table", "default")));
initKeystrokeInterceptor();
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
/// try to load user defined executable functions, throw on error and die

View File

@ -231,8 +231,8 @@ static Int64 transformSigned(Int64 x, UInt64 seed)
{
if (x >= 0)
return transform(x, seed);
else
return -transform(-x, seed); /// It works Ok even for minimum signed number.
return -transform(-x, seed); /// It works Ok even for minimum signed number.
}
@ -1105,8 +1105,8 @@ public:
{
if (isUInt(data_type))
return std::make_unique<UnsignedIntegerModel>(seed);
else
return std::make_unique<SignedIntegerModel>(seed);
return std::make_unique<SignedIntegerModel>(seed);
}
if (typeid_cast<const DataTypeFloat32 *>(&data_type))

View File

@ -39,13 +39,13 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
auto identifier_quote = getIdentifierQuote(connection);
if (identifier_quote.empty())
return IdentifierQuotingStyle::Backticks;
else if (identifier_quote[0] == '`')
if (identifier_quote[0] == '`')
return IdentifierQuotingStyle::Backticks;
else if (identifier_quote[0] == '"')
if (identifier_quote[0] == '"')
return IdentifierQuotingStyle::DoubleQuotes;
else
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
}
}

View File

@ -127,8 +127,8 @@ std::string validateODBCConnectionString(const std::string & connection_string)
if (*pos == '{')
return read_escaped_value();
else
return read_plain_value();
return read_plain_value();
};
std::map<std::string, std::string> parameters;
@ -206,12 +206,10 @@ std::string validateODBCConnectionString(const std::string & connection_string)
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
break;
}
else
{
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
reconstructed_connection_string.append("}}");
value_pos = next_pos + 1;
}
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
reconstructed_connection_string.append("}}");
value_pos = next_pos + 1;
}
reconstructed_connection_string += '}';

View File

@ -69,6 +69,7 @@
#include <Interpreters/registerInterpreters.h>
#include <Interpreters/JIT/CompiledExpressionCache.h>
#include <Access/AccessControl.h>
#include <Storages/MaterializedView/RefreshSet.h>
#include <Storages/MergeTree/MergeTreeSettings.h>
#include <Storages/StorageReplicatedMergeTree.h>
#include <Storages/System/attachSystemTables.h>
@ -158,6 +159,11 @@ namespace Setting
extern const SettingsSeconds send_timeout;
}
namespace MergeTreeSetting
{
extern const MergeTreeSettingsBool allow_remote_fs_zero_copy_replication;
}
}
namespace CurrentMetrics
@ -599,7 +605,7 @@ void sanityChecks(Server & server)
{
}
if (server.context()->getMergeTreeSettings().allow_remote_fs_zero_copy_replication)
if (server.context()->getMergeTreeSettings()[MergeTreeSetting::allow_remote_fs_zero_copy_replication])
{
server.context()->addWarningMessage("The setting 'allow_remote_fs_zero_copy_replication' is enabled for MergeTree tables."
" But the feature of 'zero-copy replication' is under development and is not ready for production."
@ -1129,9 +1135,6 @@ try
/// We need to reload server settings because config could be updated via zookeeper.
server_settings.loadSettingsFromConfig(config());
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
sanityChecks(*this);
#if defined(OS_LINUX)
std::string executable_path = getExecutablePath();
@ -1493,6 +1496,8 @@ try
NamedCollectionFactory::instance().loadIfNot();
FileCacheFactory::instance().loadDefaultCaches(config());
/// Initialize main config reloader.
std::string include_from_path = config().getString("include_from", "/etc/metrika.xml");
@ -2023,6 +2028,11 @@ try
if (!filesystem_caches_path.empty())
global_context->setFilesystemCachesPath(filesystem_caches_path);
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
/// Additionally, making the check after the default profile is initialized.
/// It is important to initialize MergeTreeSettings after Settings, to support compatibility for MergeTreeSettings.
sanityChecks(*this);
/// Check sanity of MergeTreeSettings on server startup
{
size_t background_pool_tasks = global_context->getMergeMutateExecutor()->getMaxTasksCount();
@ -2076,6 +2086,12 @@ try
try
{
/// Don't run background queries until we loaded tables.
/// (In particular things would break if a background drop query happens before the
/// loadMarkedAsDroppedTables() call below - it'll see dropped table metadata and try to
/// drop the table a second time and throw an exception.)
global_context->getRefreshSet().setRefreshesStopped(true);
auto & database_catalog = DatabaseCatalog::instance();
/// We load temporary database first, because projections need it.
database_catalog.initializeAndLoadTemporaryDatabase();
@ -2115,6 +2131,8 @@ try
database_catalog.assertDatabaseExists(default_database);
/// Load user-defined SQL functions.
global_context->getUserDefinedSQLObjectsStorage().loadObjects();
global_context->getRefreshSet().setRefreshesStopped(false);
}
catch (...)
{

View File

@ -116,16 +116,14 @@ namespace
reading_dependents = false;
continue;
}
else if (line == "DEPENDENTS")
if (line == "DEPENDENTS")
{
reading_dependents = true;
reading_dependencies = false;
continue;
}
else if (line.empty())
{
if (line.empty())
continue;
}
size_t separator1 = line.find('\t');
size_t separator2 = line.find('\t', separator1 + 1);

View File

@ -22,8 +22,10 @@
#include <Backups/RestorerFromBackup.h>
#include <Core/Settings.h>
#include <base/defines.h>
#include <base/range.h>
#include <IO/Operators.h>
#include <Common/re2.h>
#include <Poco/AccessExpireCache.h>
#include <boost/algorithm/string/join.hpp>
#include <filesystem>
@ -132,8 +134,8 @@ public:
"' registered for user-defined settings",
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
}
else
BaseSettingsHelpers::throwSettingNotFound(setting_name);
throw Exception(ErrorCodes::UNKNOWN_SETTING, "Unknown setting '{}'", String{setting_name});
}
private:
@ -828,8 +830,7 @@ std::shared_ptr<const EnabledQuota> AccessControl::getAuthenticationQuota(
quota_key,
throw_if_client_key_empty);
}
else
return nullptr;
return nullptr;
}

View File

@ -1155,9 +1155,6 @@ private:
calculateMinMaxFlags();
if (!isLeaf())
return;
auto new_flags = function(flags, min_flags_with_children, max_flags_with_children, level, grant_option);
if (new_flags != flags)
@ -1483,17 +1480,17 @@ bool AccessRights::isGrantedImplHelper(const AccessRightsElement & element) cons
{
if (element.anyParameter())
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
else
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
}
else if (element.anyDatabase())
if (element.anyDatabase())
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
else if (element.anyTable())
if (element.anyTable())
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database);
else if (element.anyColumn())
if (element.anyColumn())
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table);
else
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
}
template <bool grant_option, bool wildcard>
@ -1503,16 +1500,14 @@ bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const
{
if (element.grant_option)
return isGrantedImplHelper<true, true>(element);
else
return isGrantedImplHelper<grant_option, true>(element);
}
else
{
if (element.grant_option)
return isGrantedImplHelper<true, wildcard>(element);
else
return isGrantedImplHelper<grant_option, wildcard>(element);
return isGrantedImplHelper<grant_option, true>(element);
}
if (element.grant_option)
return isGrantedImplHelper<true, wildcard>(element);
return isGrantedImplHelper<grant_option, wildcard>(element);
}
template <bool grant_option, bool wildcard>

View File

@ -117,20 +117,20 @@ bool operator ==(const AuthenticationData & lhs, const AuthenticationData & rhs)
}
void AuthenticationData::setPassword(const String & password_)
void AuthenticationData::setPassword(const String & password_, bool validate)
{
switch (type)
{
case AuthenticationType::PLAINTEXT_PASSWORD:
setPasswordHashBinary(Util::stringToDigest(password_));
setPasswordHashBinary(Util::stringToDigest(password_), validate);
return;
case AuthenticationType::SHA256_PASSWORD:
setPasswordHashBinary(Util::encodeSHA256(password_));
setPasswordHashBinary(Util::encodeSHA256(password_), validate);
return;
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
setPasswordHashBinary(Util::encodeDoubleSHA1(password_));
setPasswordHashBinary(Util::encodeDoubleSHA1(password_), validate);
return;
case AuthenticationType::BCRYPT_PASSWORD:
@ -149,12 +149,12 @@ void AuthenticationData::setPassword(const String & password_)
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "setPassword(): authentication type {} not supported", toString(type));
}
void AuthenticationData::setPasswordBcrypt(const String & password_, int workfactor_)
void AuthenticationData::setPasswordBcrypt(const String & password_, int workfactor_, bool validate)
{
if (type != AuthenticationType::BCRYPT_PASSWORD)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot specify bcrypt password for authentication type {}", toString(type));
setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_));
setPasswordHashBinary(Util::encodeBcrypt(password_, workfactor_), validate);
}
String AuthenticationData::getPassword() const
@ -165,7 +165,7 @@ String AuthenticationData::getPassword() const
}
void AuthenticationData::setPasswordHashHex(const String & hash)
void AuthenticationData::setPasswordHashHex(const String & hash, bool validate)
{
Digest digest;
digest.resize(hash.size() / 2);
@ -179,7 +179,7 @@ void AuthenticationData::setPasswordHashHex(const String & hash)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Cannot read password hash in hex, check for valid characters [0-9a-fA-F] and length");
}
setPasswordHashBinary(digest);
setPasswordHashBinary(digest, validate);
}
@ -195,7 +195,7 @@ String AuthenticationData::getPasswordHashHex() const
}
void AuthenticationData::setPasswordHashBinary(const Digest & hash)
void AuthenticationData::setPasswordHashBinary(const Digest & hash, bool validate)
{
switch (type)
{
@ -217,7 +217,7 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
case AuthenticationType::DOUBLE_SHA1_PASSWORD:
{
if (hash.size() != 20)
if (validate && hash.size() != 20)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Password hash for the 'DOUBLE_SHA1_PASSWORD' authentication type has length {} "
"but must be exactly 20 bytes.", hash.size());
@ -231,7 +231,7 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
/// However the library we use to encode it requires hash string to be 64 characters long,
/// so we also allow the hash of this length.
if (hash.size() != 59 && hash.size() != 60 && hash.size() != 64)
if (validate && hash.size() != 59 && hash.size() != 60 && hash.size() != 64)
throw Exception(ErrorCodes::BAD_ARGUMENTS,
"Password hash for the 'BCRYPT_PASSWORD' authentication type has length {} "
"but must be 59 or 60 bytes.", hash.size());
@ -240,10 +240,13 @@ void AuthenticationData::setPasswordHashBinary(const Digest & hash)
resized.resize(64);
#if USE_BCRYPT
/// Verify that it is a valid hash
int ret = bcrypt_checkpw("", reinterpret_cast<const char *>(resized.data()));
if (ret == -1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Could not decode the provided hash with 'bcrypt_hash'");
if (validate)
{
/// Verify that it is a valid hash
int ret = bcrypt_checkpw("", reinterpret_cast<const char *>(resized.data()));
if (ret == -1)
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Could not decode the provided hash with 'bcrypt_hash'");
}
#endif
password_hash = hash;
@ -385,7 +388,7 @@ std::shared_ptr<ASTAuthenticationData> AuthenticationData::toAST() const
}
AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & query, ContextPtr context, bool check_password_rules)
AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & query, ContextPtr context, bool validate)
{
if (query.type && query.type == AuthenticationType::NO_PASSWORD)
return AuthenticationData();
@ -431,7 +434,7 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
if (!query.type && !context)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot get default password type without context");
if (check_password_rules && !context)
if (validate && !context)
throw Exception(ErrorCodes::LOGICAL_ERROR, "Cannot check password complexity rules without context");
if (query.type == AuthenticationType::BCRYPT_PASSWORD && !context)
@ -448,13 +451,13 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
AuthenticationData auth_data(current_type);
if (check_password_rules)
if (validate)
context->getAccessControl().checkPasswordComplexityRules(value);
if (query.type == AuthenticationType::BCRYPT_PASSWORD)
{
int workfactor = context->getAccessControl().getBcryptWorkfactor();
auth_data.setPasswordBcrypt(value, workfactor);
auth_data.setPasswordBcrypt(value, workfactor, validate);
return auth_data;
}
@ -486,7 +489,7 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
#endif
}
auth_data.setPassword(value);
auth_data.setPassword(value, validate);
return auth_data;
}
@ -498,13 +501,12 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
if (query.type == AuthenticationType::BCRYPT_PASSWORD)
{
auth_data.setPasswordHashBinary(AuthenticationData::Util::stringToDigest(value));
auth_data.setPasswordHashBinary(AuthenticationData::Util::stringToDigest(value), validate);
return auth_data;
}
else
{
auth_data.setPasswordHashHex(value);
}
auth_data.setPasswordHashHex(value, validate);
if (query.type == AuthenticationType::SHA256_PASSWORD && args_size == 2)
{

Some files were not shown because too many files have changed in this diff Show More