mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-21 23:21:59 +00:00
Merge remote-tracking branch 'origin' into vectorshine-master
This commit is contained in:
commit
1f13b7b314
@ -16,3 +16,6 @@
|
|||||||
|
|
||||||
# Applied Black formatter for Python code
|
# Applied Black formatter for Python code
|
||||||
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||||
|
|
||||||
|
# Enabling clang-tidy readability-else-no-return rule
|
||||||
|
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e
|
||||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -4,7 +4,6 @@ self-hosted-runner:
|
|||||||
- func-tester
|
- func-tester
|
||||||
- func-tester-aarch64
|
- func-tester-aarch64
|
||||||
- fuzzer-unit-tester
|
- fuzzer-unit-tester
|
||||||
- stress-tester
|
|
||||||
- style-checker
|
- style-checker
|
||||||
- style-checker-aarch64
|
- style-checker-aarch64
|
||||||
- release-maker
|
- release-maker
|
||||||
|
21
.github/workflows/backport_branches.yml
vendored
21
.github/workflows/backport_branches.yml
vendored
@ -229,18 +229,26 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (tsan)
|
test_name: Stress test (tsan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
IntegrationTestsRelease:
|
IntegrationTestsAsanOldAnalyzer:
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
needs: [RunConfig, BuilderDebAsan]
|
||||||
if: ${{ !failure() && !cancelled() }}
|
if: ${{ !failure() && !cancelled() }}
|
||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Integration tests (release)
|
test_name: Integration tests (asan, old analyzer)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
|
IntegrationTestsTsan:
|
||||||
|
needs: [RunConfig, BuilderDebTsan]
|
||||||
|
if: ${{ !failure() && !cancelled() }}
|
||||||
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
|
with:
|
||||||
|
test_name: Integration tests (tsan)
|
||||||
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
@ -250,7 +258,8 @@ jobs:
|
|||||||
- FunctionalStatelessTestAsan
|
- FunctionalStatelessTestAsan
|
||||||
- FunctionalStatefulTestDebug
|
- FunctionalStatefulTestDebug
|
||||||
- StressTestTsan
|
- StressTestTsan
|
||||||
- IntegrationTestsRelease
|
- IntegrationTestsTsan
|
||||||
|
- IntegrationTestsAsanOldAnalyzer
|
||||||
- CompatibilityCheckX86
|
- CompatibilityCheckX86
|
||||||
- CompatibilityCheckAarch64
|
- CompatibilityCheckAarch64
|
||||||
runs-on: [self-hosted, style-checker]
|
runs-on: [self-hosted, style-checker]
|
||||||
|
18
.github/workflows/release_branches.yml
vendored
18
.github/workflows/release_branches.yml
vendored
@ -374,7 +374,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (asan)
|
test_name: Stress test (asan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StressTestTsan:
|
StressTestTsan:
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
needs: [RunConfig, BuilderDebTsan]
|
||||||
@ -382,7 +382,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (tsan)
|
test_name: Stress test (tsan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StressTestMsan:
|
StressTestMsan:
|
||||||
needs: [RunConfig, BuilderDebMsan]
|
needs: [RunConfig, BuilderDebMsan]
|
||||||
@ -390,7 +390,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (msan)
|
test_name: Stress test (msan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StressTestUBsan:
|
StressTestUBsan:
|
||||||
needs: [RunConfig, BuilderDebUBsan]
|
needs: [RunConfig, BuilderDebUBsan]
|
||||||
@ -398,7 +398,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (ubsan)
|
test_name: Stress test (ubsan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
StressTestDebug:
|
StressTestDebug:
|
||||||
needs: [RunConfig, BuilderDebDebug]
|
needs: [RunConfig, BuilderDebDebug]
|
||||||
@ -406,7 +406,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Stress test (debug)
|
test_name: Stress test (debug)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
#############################################################################################
|
#############################################################################################
|
||||||
############################# INTEGRATION TESTS #############################################
|
############################# INTEGRATION TESTS #############################################
|
||||||
@ -417,7 +417,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Integration tests (asan)
|
test_name: Integration tests (asan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
IntegrationTestsAnalyzerAsan:
|
IntegrationTestsAnalyzerAsan:
|
||||||
needs: [RunConfig, BuilderDebAsan]
|
needs: [RunConfig, BuilderDebAsan]
|
||||||
@ -425,7 +425,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Integration tests (asan, old analyzer)
|
test_name: Integration tests (asan, old analyzer)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
IntegrationTestsTsan:
|
IntegrationTestsTsan:
|
||||||
needs: [RunConfig, BuilderDebTsan]
|
needs: [RunConfig, BuilderDebTsan]
|
||||||
@ -433,7 +433,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Integration tests (tsan)
|
test_name: Integration tests (tsan)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
IntegrationTestsRelease:
|
IntegrationTestsRelease:
|
||||||
needs: [RunConfig, BuilderDebRelease]
|
needs: [RunConfig, BuilderDebRelease]
|
||||||
@ -441,7 +441,7 @@ jobs:
|
|||||||
uses: ./.github/workflows/reusable_test.yml
|
uses: ./.github/workflows/reusable_test.yml
|
||||||
with:
|
with:
|
||||||
test_name: Integration tests (release)
|
test_name: Integration tests (release)
|
||||||
runner_type: stress-tester
|
runner_type: func-tester
|
||||||
data: ${{ needs.RunConfig.outputs.data }}
|
data: ${{ needs.RunConfig.outputs.data }}
|
||||||
FinishCheck:
|
FinishCheck:
|
||||||
if: ${{ !cancelled() }}
|
if: ${{ !cancelled() }}
|
||||||
|
@ -339,7 +339,6 @@ set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3
|
|||||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||||
|
|
||||||
if (OS_DARWIN)
|
if (OS_DARWIN)
|
||||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
|
||||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
|
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
@ -110,7 +110,6 @@ struct DecomposedFloat
|
|||||||
{
|
{
|
||||||
if (!isNegative())
|
if (!isNegative())
|
||||||
return rhs > 0 ? -1 : 1;
|
return rhs > 0 ? -1 : 1;
|
||||||
else
|
|
||||||
return rhs >= 0 ? -1 : 1;
|
return rhs >= 0 ? -1 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -128,7 +127,6 @@ struct DecomposedFloat
|
|||||||
|
|
||||||
if (mantissa() == 0)
|
if (mantissa() == 0)
|
||||||
return 0;
|
return 0;
|
||||||
else
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -169,7 +167,6 @@ struct DecomposedFloat
|
|||||||
/// Float has no fractional part means that the numbers are equal.
|
/// Float has no fractional part means that the numbers are equal.
|
||||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
||||||
return 0;
|
return 0;
|
||||||
else
|
|
||||||
/// Float has fractional part means its abs value is larger.
|
/// Float has fractional part means its abs value is larger.
|
||||||
return isNegative() ? -1 : 1;
|
return isNegative() ? -1 : 1;
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,6 @@ JSON::ElementType JSON::getType() const
|
|||||||
Pos after_string = skipString();
|
Pos after_string = skipString();
|
||||||
if (after_string < ptr_end && *after_string == ':')
|
if (after_string < ptr_end && *after_string == ':')
|
||||||
return TYPE_NAME_VALUE_PAIR;
|
return TYPE_NAME_VALUE_PAIR;
|
||||||
else
|
|
||||||
return TYPE_STRING;
|
return TYPE_STRING;
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
@ -474,7 +473,6 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
|
|||||||
|
|
||||||
if (it == end())
|
if (it == end())
|
||||||
return nullptr;
|
return nullptr;
|
||||||
else
|
|
||||||
return it->data();
|
return it->data();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
|
|||||||
|
|
||||||
if (*pos == '"')
|
if (*pos == '"')
|
||||||
return false;
|
return false;
|
||||||
else if (*pos == '\\')
|
if (*pos == '\\')
|
||||||
return true;
|
return true;
|
||||||
throw JSONException("JSON: unexpected end of data.");
|
throw JSONException("JSON: unexpected end of data.");
|
||||||
}
|
}
|
||||||
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
|
|||||||
|
|
||||||
if (*pos == '"')
|
if (*pos == '"')
|
||||||
return false;
|
return false;
|
||||||
else if (pos < ptr_end)
|
if (pos < ptr_end)
|
||||||
return true;
|
return true;
|
||||||
throw JSONException("JSON: unexpected end of data.");
|
throw JSONException("JSON: unexpected end of data.");
|
||||||
}
|
}
|
||||||
@ -682,9 +680,8 @@ double JSON::toDouble() const
|
|||||||
|
|
||||||
if (type == TYPE_NUMBER)
|
if (type == TYPE_NUMBER)
|
||||||
return getDouble();
|
return getDouble();
|
||||||
else if (type == TYPE_STRING)
|
if (type == TYPE_STRING)
|
||||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
|
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
|
||||||
else
|
|
||||||
throw JSONException("JSON: cannot convert value to double.");
|
throw JSONException("JSON: cannot convert value to double.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -694,9 +691,8 @@ Int64 JSON::toInt() const
|
|||||||
|
|
||||||
if (type == TYPE_NUMBER)
|
if (type == TYPE_NUMBER)
|
||||||
return getInt();
|
return getInt();
|
||||||
else if (type == TYPE_STRING)
|
if (type == TYPE_STRING)
|
||||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
|
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
|
||||||
else
|
|
||||||
throw JSONException("JSON: cannot convert value to signed integer.");
|
throw JSONException("JSON: cannot convert value to signed integer.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -706,9 +702,8 @@ UInt64 JSON::toUInt() const
|
|||||||
|
|
||||||
if (type == TYPE_NUMBER)
|
if (type == TYPE_NUMBER)
|
||||||
return getUInt();
|
return getUInt();
|
||||||
else if (type == TYPE_STRING)
|
if (type == TYPE_STRING)
|
||||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
|
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
|
||||||
else
|
|
||||||
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -718,12 +713,10 @@ std::string JSON::toString() const
|
|||||||
|
|
||||||
if (type == TYPE_STRING)
|
if (type == TYPE_STRING)
|
||||||
return getString();
|
return getString();
|
||||||
else
|
|
||||||
{
|
|
||||||
Pos pos = skipElement();
|
Pos pos = skipElement();
|
||||||
return std::string(ptr_begin, pos - ptr_begin);
|
return std::string(ptr_begin, pos - ptr_begin);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
JSON::iterator JSON::iterator::begin() const
|
JSON::iterator JSON::iterator::begin() const
|
||||||
|
@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
|
|||||||
|
|
||||||
if (key_json.isType<T>())
|
if (key_json.isType<T>())
|
||||||
return key_json.get<T>();
|
return key_json.get<T>();
|
||||||
else
|
|
||||||
return default_;
|
return default_;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return default_;
|
return default_;
|
||||||
}
|
}
|
||||||
|
@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
|||||||
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
|
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
|
||||||
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
|
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
|
||||||
}
|
}
|
||||||
else if (size >= 4)
|
if (size >= 4)
|
||||||
{
|
{
|
||||||
/// Chunks of 4..7 bytes.
|
/// Chunks of 4..7 bytes.
|
||||||
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
|
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
|
||||||
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
|
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
|
||||||
}
|
}
|
||||||
else if (size >= 2)
|
if (size >= 2)
|
||||||
{
|
{
|
||||||
/// Chunks of 2..3 bytes.
|
/// Chunks of 2..3 bytes.
|
||||||
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
|
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
|
||||||
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
|
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
|
||||||
}
|
}
|
||||||
else if (size >= 1)
|
if (size >= 1)
|
||||||
{
|
{
|
||||||
/// A single byte.
|
/// A single byte.
|
||||||
return *p1 == *p2;
|
return *p1 == *p2;
|
||||||
|
@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
|
|||||||
key = arg.substr(key_start);
|
key = arg.substr(key_start);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
key = "";
|
key = "";
|
||||||
}
|
|
||||||
|
|
||||||
if (key_start == std::string::npos)
|
if (key_start == std::string::npos)
|
||||||
continue;
|
continue;
|
||||||
|
@ -330,7 +330,6 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
|
|||||||
#if defined(__SSE4_2__)
|
#if defined(__SSE4_2__)
|
||||||
if (sizeof...(symbols) >= 5)
|
if (sizeof...(symbols) >= 5)
|
||||||
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
||||||
else
|
|
||||||
#endif
|
#endif
|
||||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||||
}
|
}
|
||||||
@ -341,7 +340,6 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
|
|||||||
#if defined(__SSE4_2__)
|
#if defined(__SSE4_2__)
|
||||||
if (symbols.str.size() >= 5)
|
if (symbols.str.size() >= 5)
|
||||||
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
|
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
|
||||||
else
|
|
||||||
#endif
|
#endif
|
||||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||||
}
|
}
|
||||||
|
@ -33,7 +33,6 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
|||||||
uint64_t value;
|
uint64_t value;
|
||||||
if (setting_file >> value)
|
if (setting_file >> value)
|
||||||
return {value};
|
return {value};
|
||||||
else
|
|
||||||
return {}; /// e.g. the cgroups default "max"
|
return {}; /// e.g. the cgroups default "max"
|
||||||
}
|
}
|
||||||
current_cgroup = current_cgroup.parent_path();
|
current_cgroup = current_cgroup.parent_path();
|
||||||
|
@ -1420,8 +1420,6 @@ config
|
|||||||
configs
|
configs
|
||||||
conformant
|
conformant
|
||||||
congruential
|
congruential
|
||||||
conjuction
|
|
||||||
conjuctive
|
|
||||||
connectionId
|
connectionId
|
||||||
const
|
const
|
||||||
contrib
|
contrib
|
||||||
@ -1698,7 +1696,6 @@ formatReadableSize
|
|||||||
formatReadableTimeDelta
|
formatReadableTimeDelta
|
||||||
formatRow
|
formatRow
|
||||||
formatRowNoNewline
|
formatRowNoNewline
|
||||||
formated
|
|
||||||
formatschema
|
formatschema
|
||||||
formatter
|
formatter
|
||||||
formatters
|
formatters
|
||||||
@ -3048,3 +3045,89 @@ znode
|
|||||||
znodes
|
znodes
|
||||||
zookeeperSessionUptime
|
zookeeperSessionUptime
|
||||||
zstd
|
zstd
|
||||||
|
ArrowCompression
|
||||||
|
CapnProtoEnumComparingMode
|
||||||
|
DateTimeInputFormat
|
||||||
|
DateTimeOutputFormat
|
||||||
|
DateTimeOverflowBehavior
|
||||||
|
deserialize
|
||||||
|
dotall
|
||||||
|
EachRow
|
||||||
|
EscapingRule
|
||||||
|
IdentifierQuotingRule
|
||||||
|
IdentifierQuotingStyle
|
||||||
|
IntervalOutputFormat
|
||||||
|
MsgPackUUIDRepresentation
|
||||||
|
ORCCompression
|
||||||
|
ParquetCompression
|
||||||
|
ParquetVersion
|
||||||
|
SchemaInferenceMode
|
||||||
|
alloc
|
||||||
|
CacheWarmer
|
||||||
|
conjuctive
|
||||||
|
cors
|
||||||
|
CORS
|
||||||
|
countIf
|
||||||
|
DefaultTableEngine
|
||||||
|
dereference
|
||||||
|
DistributedDDLOutputMode
|
||||||
|
DistributedProductMode
|
||||||
|
formatdatetime
|
||||||
|
inequal
|
||||||
|
INVOKER
|
||||||
|
ITION
|
||||||
|
JoinAlgorithm
|
||||||
|
JoinStrictness
|
||||||
|
keepalive
|
||||||
|
ListObject
|
||||||
|
ListObjects
|
||||||
|
LoadBalancing
|
||||||
|
LocalFSReadMethod
|
||||||
|
LogQueriesType
|
||||||
|
LogsLevel
|
||||||
|
MaxThreads
|
||||||
|
MemorySample
|
||||||
|
multibuffer
|
||||||
|
multiif
|
||||||
|
multiread
|
||||||
|
multithreading
|
||||||
|
MySQLDataTypesSupport
|
||||||
|
nonconst
|
||||||
|
NonZeroUInt
|
||||||
|
nullptr
|
||||||
|
OverflowMode
|
||||||
|
OverflowModeGroupBy
|
||||||
|
ParallelReplicasMode
|
||||||
|
param
|
||||||
|
parsedatetime
|
||||||
|
perf
|
||||||
|
PerfEventInfo
|
||||||
|
perkey
|
||||||
|
prefetched
|
||||||
|
prefetches
|
||||||
|
prefetching
|
||||||
|
preimage
|
||||||
|
QueryCacheNondeterministicFunctionHandling
|
||||||
|
QueryCacheSystemTableHandling
|
||||||
|
remerge
|
||||||
|
replcase
|
||||||
|
rerange
|
||||||
|
RetryStrategy
|
||||||
|
rowlist
|
||||||
|
SetOperationMode
|
||||||
|
ShortCircuitFunctionEvaluation
|
||||||
|
SQLSecurityType
|
||||||
|
sumIf
|
||||||
|
TCPHandler
|
||||||
|
throwif
|
||||||
|
TotalsMode
|
||||||
|
TransactionsWaitCSNMode
|
||||||
|
undelete
|
||||||
|
unmerged
|
||||||
|
DataPacket
|
||||||
|
DDLs
|
||||||
|
DistributedCacheLogMode
|
||||||
|
DistributedCachePoolBehaviourOnLimit
|
||||||
|
SharedJoin
|
||||||
|
ShareSet
|
||||||
|
unacked
|
||||||
|
@ -48,6 +48,8 @@ if (NOT LINKER_NAME)
|
|||||||
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
||||||
elseif (OS_DARWIN)
|
elseif (OS_DARWIN)
|
||||||
find_program (LLD_PATH NAMES "ld")
|
find_program (LLD_PATH NAMES "ld")
|
||||||
|
# Duplicate libraries passed to the linker is not a problem.
|
||||||
|
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_warn_duplicate_libraries")
|
||||||
endif ()
|
endif ()
|
||||||
if (LLD_PATH)
|
if (LLD_PATH)
|
||||||
if (OS_LINUX OR OS_DARWIN)
|
if (OS_LINUX OR OS_DARWIN)
|
||||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
|
Subproject commit 62e871c36fa93c0af939bd31762845265214fe3d
|
2
contrib/libdivide
vendored
2
contrib/libdivide
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04
|
Subproject commit 01526031eb79375dc85e0212c966d2c514a01234
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
|||||||
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
|
Subproject commit e341c8b43861b43de29c48ab65f292d997096953
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.9.1.3278"
|
ARG VERSION="24.9.2.42"
|
||||||
ARG PACKAGES="clickhouse-keeper"
|
ARG PACKAGES="clickhouse-keeper"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
|||||||
# lts / testing / prestable / etc
|
# lts / testing / prestable / etc
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||||
ARG VERSION="24.9.1.3278"
|
ARG VERSION="24.9.2.42"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
ARG DIRECT_DOWNLOAD_URLS=""
|
ARG DIRECT_DOWNLOAD_URLS=""
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
|||||||
|
|
||||||
ARG REPO_CHANNEL="stable"
|
ARG REPO_CHANNEL="stable"
|
||||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||||
ARG VERSION="24.9.1.3278"
|
ARG VERSION="24.9.2.42"
|
||||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||||
|
|
||||||
#docker-official-library:off
|
#docker-official-library:off
|
||||||
|
@ -0,0 +1 @@
|
|||||||
|
[rabbitmq_consistent_hash_exchange].
|
@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false
|
|||||||
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
|
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
|
||||||
ssl_options.certfile = /etc/rabbitmq/server-cert.pem
|
ssl_options.certfile = /etc/rabbitmq/server-cert.pem
|
||||||
ssl_options.keyfile = /etc/rabbitmq/server-key.pem
|
ssl_options.keyfile = /etc/rabbitmq/server-key.pem
|
||||||
|
|
||||||
|
vm_memory_high_watermark.absolute = 2GB
|
||||||
|
@ -41,7 +41,7 @@ sidebar_label: 2022
|
|||||||
* Backported in [#25364](https://github.com/ClickHouse/ClickHouse/issues/25364): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Backported in [#25364](https://github.com/ClickHouse/ClickHouse/issues/25364): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Backported in [#25387](https://github.com/ClickHouse/ClickHouse/issues/25387): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Backported in [#25387](https://github.com/ClickHouse/ClickHouse/issues/25387): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Backported in [#25455](https://github.com/ClickHouse/ClickHouse/issues/25455): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Backported in [#25455](https://github.com/ClickHouse/ClickHouse/issues/25455): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Backported in [#25505](https://github.com/ClickHouse/ClickHouse/issues/25505): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Backported in [#25505](https://github.com/ClickHouse/ClickHouse/issues/25505): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
#### NO CL ENTRY
|
#### NO CL ENTRY
|
||||||
|
@ -40,7 +40,7 @@ sidebar_label: 2022
|
|||||||
* Backported in [#25362](https://github.com/ClickHouse/ClickHouse/issues/25362): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Backported in [#25362](https://github.com/ClickHouse/ClickHouse/issues/25362): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Backported in [#25386](https://github.com/ClickHouse/ClickHouse/issues/25386): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Backported in [#25386](https://github.com/ClickHouse/ClickHouse/issues/25386): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Backported in [#25456](https://github.com/ClickHouse/ClickHouse/issues/25456): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Backported in [#25456](https://github.com/ClickHouse/ClickHouse/issues/25456): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Backported in [#25504](https://github.com/ClickHouse/ClickHouse/issues/25504): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Backported in [#25504](https://github.com/ClickHouse/ClickHouse/issues/25504): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
|
|
||||||
#### NO CL ENTRY
|
#### NO CL ENTRY
|
||||||
|
@ -24,7 +24,7 @@ sidebar_label: 2022
|
|||||||
* Backported in [#25363](https://github.com/ClickHouse/ClickHouse/issues/25363): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* Backported in [#25363](https://github.com/ClickHouse/ClickHouse/issues/25363): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Backported in [#25388](https://github.com/ClickHouse/ClickHouse/issues/25388): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Backported in [#25388](https://github.com/ClickHouse/ClickHouse/issues/25388): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Backported in [#25448](https://github.com/ClickHouse/ClickHouse/issues/25448): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Backported in [#25448](https://github.com/ClickHouse/ClickHouse/issues/25448): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||||
|
|
||||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
@ -133,7 +133,7 @@ sidebar_label: 2022
|
|||||||
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||||
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||||
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||||
* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||||
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
|
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
73
docs/changelogs/v24.8.5.115-lts.md
Normal file
73
docs/changelogs/v24.8.5.115-lts.md
Normal file
@ -0,0 +1,73 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.8.5.115-lts (8c4cb00a384) FIXME as compared to v24.8.4.13-lts (53195bc189b)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#70046](https://github.com/ClickHouse/ClickHouse/issues/70046): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#69786](https://github.com/ClickHouse/ClickHouse/issues/69786): Fix attaching table when pg dbname contains "-" in MaterializedPostgreSQL. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
|
||||||
|
* Backported in [#70318](https://github.com/ClickHouse/ClickHouse/issues/70318): Fixed error on generated columns in MaterializedPostgreSQL when adnum ordering is broken [#63161](https://github.com/ClickHouse/ClickHouse/issues/63161). Fixed error on id column with nextval expression as default MaterializedPostgreSQL when there are generated columns in table. Fixed error on dropping publication with symbols except [a-z1-9-]. [#67664](https://github.com/ClickHouse/ClickHouse/pull/67664) ([Kruglov Kirill](https://github.com/1on)).
|
||||||
|
* Backported in [#69467](https://github.com/ClickHouse/ClickHouse/issues/69467): Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#69735](https://github.com/ClickHouse/ClickHouse/issues/69735): Fix crash in `lag`/`lead` which is introduced in [#67091](https://github.com/ClickHouse/ClickHouse/issues/67091). [#68262](https://github.com/ClickHouse/ClickHouse/pull/68262) ([lgbo](https://github.com/lgbo-ustc)).
|
||||||
|
* Backported in [#69444](https://github.com/ClickHouse/ClickHouse/issues/69444): After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
|
||||||
|
* Backported in [#69810](https://github.com/ClickHouse/ClickHouse/issues/69810): Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Backported in [#69757](https://github.com/ClickHouse/ClickHouse/issues/69757): Fix incorrect results of Fix uniq and GROUP BY for JSON/Dynamic types. [#69203](https://github.com/ClickHouse/ClickHouse/pull/69203) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70195](https://github.com/ClickHouse/ClickHouse/issues/70195): Fix insertion of incomplete type into Dynamic during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69398](https://github.com/ClickHouse/ClickHouse/issues/69398): Mark Dynamic type as not safe primary key type to avoid issues with Fields. [#69311](https://github.com/ClickHouse/ClickHouse/pull/69311) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69704](https://github.com/ClickHouse/ClickHouse/issues/69704): Improve restoring of access entities' dependencies. [#69346](https://github.com/ClickHouse/ClickHouse/pull/69346) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#69459](https://github.com/ClickHouse/ClickHouse/issues/69459): Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#69503](https://github.com/ClickHouse/ClickHouse/issues/69503): Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#69480](https://github.com/ClickHouse/ClickHouse/issues/69480): Quick fix for s3queue problem on 24.6 or create query with database replicated. [#69454](https://github.com/ClickHouse/ClickHouse/pull/69454) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#69535](https://github.com/ClickHouse/ClickHouse/issues/69535): Fixed case when memory consumption was too high because of the squashing in `INSERT INTO ... SELECT` or `CREATE TABLE AS SELECT` queries. [#69469](https://github.com/ClickHouse/ClickHouse/pull/69469) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||||
|
* Backported in [#69696](https://github.com/ClickHouse/ClickHouse/issues/69696): Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70439](https://github.com/ClickHouse/ClickHouse/issues/70439): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#69666](https://github.com/ClickHouse/ClickHouse/issues/69666): Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#70191](https://github.com/ClickHouse/ClickHouse/issues/70191): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Backported in [#69798](https://github.com/ClickHouse/ClickHouse/issues/69798): Make getHyperrectangleForRowGroup not throw an exception when the data type in parquet file is not convertable into the requested data type. Solved the user's problem when the Parquet file had Decimal64 data type and the column data type was DateTime. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
* Backported in [#70410](https://github.com/ClickHouse/ClickHouse/issues/70410): Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||||
|
* Backported in [#70019](https://github.com/ClickHouse/ClickHouse/issues/70019): Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#69941](https://github.com/ClickHouse/ClickHouse/issues/69941): Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70001](https://github.com/ClickHouse/ClickHouse/issues/70001): Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#70081](https://github.com/ClickHouse/ClickHouse/issues/70081): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#70068](https://github.com/ClickHouse/ClickHouse/issues/70068): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#70166](https://github.com/ClickHouse/ClickHouse/issues/70166): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#70236](https://github.com/ClickHouse/ClickHouse/issues/70236): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70203](https://github.com/ClickHouse/ClickHouse/issues/70203): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#70183](https://github.com/ClickHouse/ClickHouse/issues/70183): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70251](https://github.com/ClickHouse/ClickHouse/issues/70251): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70228](https://github.com/ClickHouse/ClickHouse/issues/70228): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70243](https://github.com/ClickHouse/ClickHouse/issues/70243): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#70432](https://github.com/ClickHouse/ClickHouse/issues/70432): Fix possible crash in JSON column. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70307](https://github.com/ClickHouse/ClickHouse/issues/70307): Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#70274](https://github.com/ClickHouse/ClickHouse/issues/70274): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70345](https://github.com/ClickHouse/ClickHouse/issues/70345): Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||||
|
* Backported in [#70426](https://github.com/ClickHouse/ClickHouse/issues/70426): Fix ALTER of Dynamic type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70371](https://github.com/ClickHouse/ClickHouse/issues/70371): Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
|
||||||
|
#### NO CL ENTRY
|
||||||
|
|
||||||
|
* NO CL ENTRY: 'Revert "Backport [#70146](https://github.com/ClickHouse/ClickHouse/issues/70146) to 24.8: Upgrade integration-runner image"'. [#70324](https://github.com/ClickHouse/ClickHouse/pull/70324) ([Max K.](https://github.com/maxknv)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#69961](https://github.com/ClickHouse/ClickHouse/issues/69961): Output an operation error for ZK Multi request failed operation into log. [#68127](https://github.com/ClickHouse/ClickHouse/pull/68127) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||||
|
* Backported in [#69491](https://github.com/ClickHouse/ClickHouse/issues/69491): Fix test_role & test_keeper_s3_snapshot integration tests. [#69013](https://github.com/ClickHouse/ClickHouse/pull/69013) ([Shankar](https://github.com/shiyer7474)).
|
||||||
|
* Backported in [#69953](https://github.com/ClickHouse/ClickHouse/issues/69953): Remove stale moving parts without zookeeper. [#69075](https://github.com/ClickHouse/ClickHouse/pull/69075) ([Kirill](https://github.com/kirillgarbar)).
|
||||||
|
* Backported in [#69353](https://github.com/ClickHouse/ClickHouse/issues/69353): Fix: Not-ready Set with parallel replicas. [#69264](https://github.com/ClickHouse/ClickHouse/pull/69264) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||||
|
* Backported in [#69427](https://github.com/ClickHouse/ClickHouse/issues/69427): Fix 24.8 setting compatibility `rows_before_aggregation`. [#69394](https://github.com/ClickHouse/ClickHouse/pull/69394) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||||
|
* Backported in [#69689](https://github.com/ClickHouse/ClickHouse/issues/69689): Add function `kill_ci_runner`. Kill runner when pre-pull failed. [#69557](https://github.com/ClickHouse/ClickHouse/pull/69557) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#69639](https://github.com/ClickHouse/ClickHouse/issues/69639): Add more contexts to the debug action and use it broadly. [#69599](https://github.com/ClickHouse/ClickHouse/pull/69599) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#69721](https://github.com/ClickHouse/ClickHouse/issues/69721): Prohibit `ALTER TABLE ... ADD INDEX ... TYPE` inverted if setting = 0. [#69684](https://github.com/ClickHouse/ClickHouse/pull/69684) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||||
|
* Backported in [#69972](https://github.com/ClickHouse/ClickHouse/issues/69972): S3Queue: support having deprecated settings to not fail server startup. [#69769](https://github.com/ClickHouse/ClickHouse/pull/69769) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#70283](https://github.com/ClickHouse/ClickHouse/issues/70283): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#70260](https://github.com/ClickHouse/ClickHouse/issues/70260): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
* Backported in [#70314](https://github.com/ClickHouse/ClickHouse/issues/70314): CI: Remove await feature from release branches. [#70294](https://github.com/ClickHouse/ClickHouse/pull/70294) ([Max K.](https://github.com/maxknv)).
|
||||||
|
* Backported in [#70380](https://github.com/ClickHouse/ClickHouse/issues/70380): Fix tiny mistake, responsible for some of kafka test flaps. Example [report](https://s3.amazonaws.com/clickhouse-test-reports/0/3198aafac59c368993e7b5f49d95674cc1b1be18/integration_tests__release__[2_4].html). [#70352](https://github.com/ClickHouse/ClickHouse/pull/70352) ([filimonov](https://github.com/filimonov)).
|
||||||
|
* Backported in [#70405](https://github.com/ClickHouse/ClickHouse/issues/70405): Closes [#69634](https://github.com/ClickHouse/ClickHouse/issues/69634). [#70354](https://github.com/ClickHouse/ClickHouse/pull/70354) ([pufit](https://github.com/pufit)).
|
||||||
|
|
33
docs/changelogs/v24.9.2.42-stable.md
Normal file
33
docs/changelogs/v24.9.2.42-stable.md
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
---
|
||||||
|
sidebar_position: 1
|
||||||
|
sidebar_label: 2024
|
||||||
|
---
|
||||||
|
|
||||||
|
# 2024 Changelog
|
||||||
|
|
||||||
|
### ClickHouse release v24.9.2.42-stable (de7c791a2ea) FIXME as compared to v24.9.1.3278-stable (6d058d82a8e)
|
||||||
|
|
||||||
|
#### Improvement
|
||||||
|
* Backported in [#70091](https://github.com/ClickHouse/ClickHouse/issues/70091): Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior of the show create query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}`, or it can cause ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)).
|
||||||
|
* Backported in [#70100](https://github.com/ClickHouse/ClickHouse/issues/70100): Follow-up to https://github.com/ClickHouse/ClickHouse/pull/69346 Point 4 described there will work now as well:. [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||||
|
* Backported in [#70048](https://github.com/ClickHouse/ClickHouse/issues/70048): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||||
|
|
||||||
|
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||||
|
* Backported in [#70193](https://github.com/ClickHouse/ClickHouse/issues/70193): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
|
||||||
|
* Backported in [#70083](https://github.com/ClickHouse/ClickHouse/issues/70083): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||||
|
* Backported in [#70070](https://github.com/ClickHouse/ClickHouse/issues/70070): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||||
|
* Backported in [#70168](https://github.com/ClickHouse/ClickHouse/issues/70168): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||||
|
* Backported in [#70238](https://github.com/ClickHouse/ClickHouse/issues/70238): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70205](https://github.com/ClickHouse/ClickHouse/issues/70205): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
|
||||||
|
* Backported in [#70185](https://github.com/ClickHouse/ClickHouse/issues/70185): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70253](https://github.com/ClickHouse/ClickHouse/issues/70253): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70230](https://github.com/ClickHouse/ClickHouse/issues/70230): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
* Backported in [#70217](https://github.com/ClickHouse/ClickHouse/issues/70217): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||||
|
* Backported in [#70267](https://github.com/ClickHouse/ClickHouse/issues/70267): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||||
|
|
||||||
|
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||||
|
|
||||||
|
* Backported in [#70052](https://github.com/ClickHouse/ClickHouse/issues/70052): Improve stateless test runner. [#69864](https://github.com/ClickHouse/ClickHouse/pull/69864) ([Alexey Katsman](https://github.com/alexkats)).
|
||||||
|
* Backported in [#70284](https://github.com/ClickHouse/ClickHouse/issues/70284): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||||
|
* Backported in [#70261](https://github.com/ClickHouse/ClickHouse/issues/70261): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||||
|
|
@ -195,6 +195,9 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
|||||||
- `--print-profile-events` – Print `ProfileEvents` packets.
|
- `--print-profile-events` – Print `ProfileEvents` packets.
|
||||||
- `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
|
- `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
|
||||||
- `--jwt` – If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
|
- `--jwt` – If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
|
||||||
|
- `--progress` – Print progress of query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress printing. Default: TTY in interactive mode, disabled in non-interactive.
|
||||||
|
- `--progress-table` – Print a progress table with changing metrics during query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress table. Default: TTY in interactive mode, disabled in non-interactive.
|
||||||
|
- `--enable-progress-table-toggle` – Enable toggling of the progress table by pressing the control key (Space). Only applicable in interactive mode with the progress table printing enabled. Default: 'true'.
|
||||||
|
|
||||||
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).
|
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).
|
||||||
|
|
||||||
|
@ -1057,12 +1057,12 @@ Default value: throw
|
|||||||
|
|
||||||
## deduplicate_merge_projection_mode
|
## deduplicate_merge_projection_mode
|
||||||
|
|
||||||
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
|
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
|
||||||
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
|
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
|
||||||
|
|
||||||
Possible values:
|
Possible values:
|
||||||
|
|
||||||
- throw, drop, rebuild
|
- ignore, throw, drop, rebuild
|
||||||
|
|
||||||
Default value: throw
|
Default value: throw
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -6,7 +6,7 @@ sidebar_label: User Settings
|
|||||||
|
|
||||||
# Users and Roles Settings
|
# Users and Roles Settings
|
||||||
|
|
||||||
The `users` section of the `user.xml` configuration file contains user settings.
|
The `users` section of the `users.xml` configuration file contains user settings.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing users. We recommend using it.
|
ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing users. We recommend using it.
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -10,21 +10,21 @@ Columns:
|
|||||||
|
|
||||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||||
- `view` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
- `view` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||||
|
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
|
||||||
- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh.
|
- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh.
|
||||||
- `last_refresh_result` ([String](../../sql-reference/data-types/string.md)) — Outcome of the latest refresh attempt.
|
- `last_success_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest successful refresh started. NULL if no successful refreshes happened since server startup or table creation.
|
||||||
- `last_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last refresh attempt. `NULL` if no refresh attempts happened since server startup or table creation.
|
- `last_success_duration_ms` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — How long the latest refresh took.
|
||||||
- `last_success_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last successful refresh. `NULL` if no successful refreshes happened since server startup or table creation.
|
- `last_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest refresh attempt finished (if known) or started (if unknown or still running). NULL if no refresh attempts happened since server startup or table creation.
|
||||||
- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took.
|
- `last_refresh_replica` ([String](../../sql-reference/data-types/string.md)) — If coordination is enabled, name of the replica that made the current (if running) or previous (if not running) refresh attempt.
|
||||||
- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start.
|
- `next_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time at which the next refresh is scheduled to start, if status = Scheduled.
|
||||||
- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled.
|
- `exception` ([String](../../sql-reference/data-types/string.md)) — Error message from previous attempt if it failed.
|
||||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace.
|
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How many failed attempts there were so far, for the current refresh.
|
||||||
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry.
|
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. Not available if status is `RunningOnAnotherReplica`.
|
||||||
- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation.
|
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. Not available if status is `RunningOnAnotherReplica`.
|
||||||
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1.
|
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of bytes read during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far.
|
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||||
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh.
|
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||||
|
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number rof bytes written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||||
(There are additional columns related to current refresh progress, but they are currently unreliable.)
|
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -226,9 +226,9 @@ Result:
|
|||||||
|
|
||||||
## bitTestAll
|
## bitTestAll
|
||||||
|
|
||||||
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
Returns result of [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
||||||
|
|
||||||
The conjuction for bit-wise operations:
|
The conjunction for bit-wise operations:
|
||||||
|
|
||||||
0 AND 0 = 0
|
0 AND 0 = 0
|
||||||
|
|
||||||
@ -251,7 +251,7 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...)
|
|||||||
|
|
||||||
**Returned value**
|
**Returned value**
|
||||||
|
|
||||||
- Result of the logical conjuction. [UInt8](../data-types/int-uint.md).
|
- Result of the logical conjunction. [UInt8](../data-types/int-uint.md).
|
||||||
|
|
||||||
**Example**
|
**Example**
|
||||||
|
|
||||||
|
@ -316,6 +316,38 @@ Result:
|
|||||||
|
|
||||||
Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null.
|
Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null.
|
||||||
|
|
||||||
|
**Syntax**
|
||||||
|
|
||||||
|
```sql
|
||||||
|
toIPv4OrNull(value)
|
||||||
|
```
|
||||||
|
|
||||||
|
**Arguments**
|
||||||
|
|
||||||
|
- `value` — The value with IPv4 address.
|
||||||
|
|
||||||
|
**Returned value**
|
||||||
|
|
||||||
|
- `value` converted to the current IPv4 address. [String](../data-types/string.md).
|
||||||
|
|
||||||
|
**Example**
|
||||||
|
|
||||||
|
Query:
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SELECT
|
||||||
|
toIPv4OrNull('192.168.0.1') AS s1,
|
||||||
|
toIPv4OrNull('192.168.0') AS s2
|
||||||
|
```
|
||||||
|
|
||||||
|
Result:
|
||||||
|
|
||||||
|
```response
|
||||||
|
┌─s1──────────┬─s2───┐
|
||||||
|
│ 192.168.0.1 │ ᴺᵁᴸᴸ │
|
||||||
|
└─────────────┴──────┘
|
||||||
|
```
|
||||||
|
|
||||||
## toIPv6OrDefault(string)
|
## toIPv6OrDefault(string)
|
||||||
|
|
||||||
Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns `::` (0 IPv6).
|
Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns `::` (0 IPv6).
|
||||||
|
@ -135,15 +135,15 @@ To change SQL security for an existing view, use
|
|||||||
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
|
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
|
||||||
```
|
```
|
||||||
|
|
||||||
### Examples sql security
|
### Examples
|
||||||
```sql
|
```sql
|
||||||
CREATE test_view
|
CREATE VIEW test_view
|
||||||
DEFINER = alice SQL SECURITY DEFINER
|
DEFINER = alice SQL SECURITY DEFINER
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
```
|
```
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
CREATE test_view
|
CREATE VIEW test_view
|
||||||
SQL SECURITY INVOKER
|
SQL SECURITY INVOKER
|
||||||
AS SELECT ...
|
AS SELECT ...
|
||||||
```
|
```
|
||||||
@ -184,14 +184,6 @@ Differences from regular non-refreshable materialized views:
|
|||||||
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
|
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
|
||||||
:::
|
:::
|
||||||
|
|
||||||
:::note
|
|
||||||
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
|
|
||||||
* not compatible with Replicated database or table engines
|
|
||||||
* It is not supported in ClickHouse Cloud
|
|
||||||
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
|
|
||||||
* no limit on number of concurrent refreshes.
|
|
||||||
:::
|
|
||||||
|
|
||||||
### Refresh Schedule
|
### Refresh Schedule
|
||||||
|
|
||||||
Example refresh schedules:
|
Example refresh schedules:
|
||||||
@ -203,6 +195,10 @@ REFRESH EVERY 2 WEEK OFFSET 5 DAY 15 HOUR 10 MINUTE -- every other Saturday, at
|
|||||||
REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc
|
REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc
|
||||||
REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day
|
REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day
|
||||||
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER
|
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER
|
||||||
|
REFRESH EVERY 1 WEEK 2 DAYS -- every 9 days, not on any particular day of the week or month;
|
||||||
|
-- specifically, when day number (since 1969-12-29) is divisible by 9
|
||||||
|
REFRESH EVERY 5 MONTHS -- every 5 months, different months each year (as 12 is not divisible by 5);
|
||||||
|
-- specifically, when month number (since 1970-01) is divisible by 5
|
||||||
```
|
```
|
||||||
|
|
||||||
`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.:
|
`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.:
|
||||||
@ -214,6 +210,16 @@ At most one refresh may be running at a time, for a given view. E.g. if a view w
|
|||||||
|
|
||||||
Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule.
|
Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule.
|
||||||
|
|
||||||
|
### In Replicated DB
|
||||||
|
|
||||||
|
If the refreshable materialized view is in a [Replicated database](../../../engines/database-engines/replicated.md), the replicas coordinate with each other such that only one replica performs the refresh at each scheduled time. [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) table engine is required, so that all replicas see the data produced by the refresh.
|
||||||
|
|
||||||
|
In `APPEND` mode, coordination can be disabled using `SETTINGS all_replicas = 1`. This makes replicas do refreshes independently of each other. In this case ReplicatedMergeTree is not required.
|
||||||
|
|
||||||
|
In non-`APPEND` mode, only coordinated refreshing is supported. For uncoordinated, use `Atomic` database and `CREATE ... ON CLUSTER` query to create refreshable materialized views on all replicas.
|
||||||
|
|
||||||
|
The coordination is done through Keeper. The znode path is determined by [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) server setting.
|
||||||
|
|
||||||
### Dependencies {#refresh-dependencies}
|
### Dependencies {#refresh-dependencies}
|
||||||
|
|
||||||
`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views:
|
`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views:
|
||||||
@ -277,6 +283,8 @@ The status of all refreshable materialized views is available in table [`system.
|
|||||||
|
|
||||||
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
|
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
|
||||||
|
|
||||||
|
To wait for a refresh to complete, use [`SYSTEM WAIT VIEW`](../system.md#refreshable-materialized-views). In particular, useful for waiting for initial refresh after creating a view.
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
|
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
|
||||||
:::
|
:::
|
||||||
|
@ -233,15 +233,20 @@ Hierarchy of privileges:
|
|||||||
- `addressToSymbol`
|
- `addressToSymbol`
|
||||||
- `demangle`
|
- `demangle`
|
||||||
- [SOURCES](#sources)
|
- [SOURCES](#sources)
|
||||||
|
- `AZURE`
|
||||||
- `FILE`
|
- `FILE`
|
||||||
- `URL`
|
|
||||||
- `REMOTE`
|
|
||||||
- `YSQL`
|
|
||||||
- `ODBC`
|
|
||||||
- `JDBC`
|
|
||||||
- `HDFS`
|
- `HDFS`
|
||||||
- `S3`
|
- `HIVE`
|
||||||
|
- `JDBC`
|
||||||
|
- `MONGO`
|
||||||
|
- `MYSQL`
|
||||||
|
- `ODBC`
|
||||||
- `POSTGRES`
|
- `POSTGRES`
|
||||||
|
- `REDIS`
|
||||||
|
- `REMOTE`
|
||||||
|
- `S3`
|
||||||
|
- `SQLITE`
|
||||||
|
- `URL`
|
||||||
- [dictGet](#dictget)
|
- [dictGet](#dictget)
|
||||||
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
||||||
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
||||||
@ -510,15 +515,20 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
|
|||||||
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
|
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
|
||||||
|
|
||||||
- `SOURCES`. Level: `GROUP`
|
- `SOURCES`. Level: `GROUP`
|
||||||
|
- `AZURE`. Level: `GLOBAL`
|
||||||
- `FILE`. Level: `GLOBAL`
|
- `FILE`. Level: `GLOBAL`
|
||||||
- `URL`. Level: `GLOBAL`
|
|
||||||
- `REMOTE`. Level: `GLOBAL`
|
|
||||||
- `YSQL`. Level: `GLOBAL`
|
|
||||||
- `ODBC`. Level: `GLOBAL`
|
|
||||||
- `JDBC`. Level: `GLOBAL`
|
|
||||||
- `HDFS`. Level: `GLOBAL`
|
- `HDFS`. Level: `GLOBAL`
|
||||||
- `S3`. Level: `GLOBAL`
|
- `HIVE`. Level: `GLOBAL`
|
||||||
|
- `JDBC`. Level: `GLOBAL`
|
||||||
|
- `MONGO`. Level: `GLOBAL`
|
||||||
|
- `MYSQL`. Level: `GLOBAL`
|
||||||
|
- `ODBC`. Level: `GLOBAL`
|
||||||
- `POSTGRES`. Level: `GLOBAL`
|
- `POSTGRES`. Level: `GLOBAL`
|
||||||
|
- `REDIS`. Level: `GLOBAL`
|
||||||
|
- `REMOTE`. Level: `GLOBAL`
|
||||||
|
- `S3`. Level: `GLOBAL`
|
||||||
|
- `SQLITE`. Level: `GLOBAL`
|
||||||
|
- `URL`. Level: `GLOBAL`
|
||||||
|
|
||||||
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
||||||
|
|
||||||
|
@ -29,7 +29,7 @@ The condition could be any expression based on your requirements.
|
|||||||
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
|
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
|
||||||
|
|
||||||
```sql
|
```sql
|
||||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
|
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,8);
|
||||||
```
|
```
|
||||||
|
|
||||||
Result:
|
Result:
|
||||||
|
@ -351,11 +351,15 @@ Shows privileges for a user.
|
|||||||
**Syntax**
|
**Syntax**
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW GRANTS [FOR user1 [, user2 ...]]
|
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
|
||||||
```
|
```
|
||||||
|
|
||||||
If user is not specified, the query returns privileges for the current user.
|
If user is not specified, the query returns privileges for the current user.
|
||||||
|
|
||||||
|
The `WITH IMPLICIT` modifier allows to show the implicit grants (e.g., `GRANT SELECT ON system.one`)
|
||||||
|
|
||||||
|
The `FINAL` modifier merges all grants from the user and its granted roles (with inheritance)
|
||||||
|
|
||||||
## SHOW CREATE USER
|
## SHOW CREATE USER
|
||||||
|
|
||||||
Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md).
|
Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md).
|
||||||
|
@ -565,3 +565,13 @@ If there's a refresh in progress for the given view, interrupt and cancel it. Ot
|
|||||||
```sql
|
```sql
|
||||||
SYSTEM CANCEL VIEW [db.]name
|
SYSTEM CANCEL VIEW [db.]name
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### SYSTEM WAIT VIEW
|
||||||
|
|
||||||
|
Waits for the running refresh to complete. If no refresh is running, returns immediately. If the latest refresh attempt failed, reports an error.
|
||||||
|
|
||||||
|
Can be used right after creating a new refreshable materialized view (without EMPTY keyword) to wait for the initial refresh to complete.
|
||||||
|
|
||||||
|
```sql
|
||||||
|
SYSTEM WAIT VIEW [db.]name
|
||||||
|
```
|
||||||
|
@ -6,7 +6,7 @@ sidebar_label: "Настройки пользователей"
|
|||||||
|
|
||||||
# Настройки пользователей {#nastroiki-polzovatelei}
|
# Настройки пользователей {#nastroiki-polzovatelei}
|
||||||
|
|
||||||
Раздел `users` конфигурационного файла `user.xml` содержит настройки для пользователей.
|
Раздел `users` конфигурационного файла `users.xml` содержит настройки для пользователей.
|
||||||
|
|
||||||
:::note Информация
|
:::note Информация
|
||||||
Для управления пользователями рекомендуется использовать [SQL-ориентированный воркфлоу](../access-rights.md#access-control), который также поддерживается в ClickHouse.
|
Для управления пользователями рекомендуется использовать [SQL-ориентированный воркфлоу](../access-rights.md#access-control), который также поддерживается в ClickHouse.
|
||||||
|
@ -29,7 +29,7 @@ FROM table2
|
|||||||
Запрос:
|
Запрос:
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
|
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,8);
|
||||||
```
|
```
|
||||||
|
|
||||||
Результат:
|
Результат:
|
||||||
|
@ -234,11 +234,14 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
|||||||
### Синтаксис {#show-grants-syntax}
|
### Синтаксис {#show-grants-syntax}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW GRANTS [FOR user]
|
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
|
||||||
```
|
```
|
||||||
|
|
||||||
Если пользователь не задан, запрос возвращает привилегии текущего пользователя.
|
Если пользователь не задан, запрос возвращает привилегии текущего пользователя.
|
||||||
|
|
||||||
|
`WITH IMPLICIT` добавляет неявные привилегии (например `GRANT SELECT ON system.one`).
|
||||||
|
|
||||||
|
`FINAL` объединяет все текущие привилегии с привилегиями всех ролей пользователя (с наследованием).
|
||||||
|
|
||||||
|
|
||||||
## SHOW CREATE USER {#show-create-user-statement}
|
## SHOW CREATE USER {#show-create-user-statement}
|
||||||
|
@ -8,7 +8,7 @@ sidebar_label: "\u7528\u6237\u8BBE\u7F6E"
|
|||||||
|
|
||||||
# 用户设置 {#user-settings}
|
# 用户设置 {#user-settings}
|
||||||
|
|
||||||
`user.xml` 中的 `users` 配置段包含了用户配置
|
`users.xml` 中的 `users` 配置段包含了用户配置
|
||||||
|
|
||||||
:::note
|
:::note
|
||||||
ClickHouse还支持 [SQL驱动的工作流](/docs/en/operations/access-rights#access-control) 用于管理用户。 我们建议使用它。
|
ClickHouse还支持 [SQL驱动的工作流](/docs/en/operations/access-rights#access-control) 用于管理用户。 我们建议使用它。
|
||||||
|
@ -220,7 +220,7 @@ SELECT bitTest(43, 2);
|
|||||||
|
|
||||||
## bitTestAll {#bittestall}
|
## bitTestAll {#bittestall}
|
||||||
|
|
||||||
返回给定位置所有位的 [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数,从0开始计数。
|
返回给定位置所有位的 [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) 进行与操作的结果。位值从右到左数,从0开始计数。
|
||||||
|
|
||||||
与运算的结果:
|
与运算的结果:
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ SHOW DICTIONARIES FROM db LIKE '%reg%' LIMIT 2
|
|||||||
### 语法 {#show-grants-syntax}
|
### 语法 {#show-grants-syntax}
|
||||||
|
|
||||||
``` sql
|
``` sql
|
||||||
SHOW GRANTS [FOR user]
|
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
|
||||||
```
|
```
|
||||||
|
|
||||||
如果未指定用户,输出当前用户的权限
|
如果未指定用户,输出当前用户的权限
|
||||||
|
@ -445,8 +445,7 @@ private:
|
|||||||
shutdown = true;
|
shutdown = true;
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
std::cerr << getCurrentExceptionMessage(print_stacktrace,
|
std::cerr << getCurrentExceptionMessage(print_stacktrace,
|
||||||
true /*check embedded stack trace*/) << std::endl;
|
true /*check embedded stack trace*/) << std::endl;
|
||||||
|
|
||||||
@ -454,7 +453,6 @@ private:
|
|||||||
++comparison_info_per_interval[info_index]->errors;
|
++comparison_info_per_interval[info_index]->errors;
|
||||||
++comparison_info_total[info_index]->errors;
|
++comparison_info_total[info_index]->errors;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// Count failed queries toward executed, so that we'd reach
|
// Count failed queries toward executed, so that we'd reach
|
||||||
// max_iterations even if every run fails.
|
// max_iterations even if every run fails.
|
||||||
++queries_executed;
|
++queries_executed;
|
||||||
|
@ -346,7 +346,9 @@ try
|
|||||||
|
|
||||||
processConfig();
|
processConfig();
|
||||||
adjustSettings();
|
adjustSettings();
|
||||||
initTTYBuffer(toProgressOption(config().getString("progress", "default")));
|
initTTYBuffer(toProgressOption(config().getString("progress", "default")),
|
||||||
|
toProgressOption(config().getString("progress-table", "default")));
|
||||||
|
initKeystrokeInterceptor();
|
||||||
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
||||||
|
|
||||||
{
|
{
|
||||||
@ -477,14 +479,11 @@ void Client::connect()
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
catch (const Exception & e)
|
catch (const Exception & e)
|
||||||
{
|
|
||||||
if (e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
|
|
||||||
{
|
{
|
||||||
/// This problem can't be fixed with reconnection so it is not attempted
|
/// This problem can't be fixed with reconnection so it is not attempted
|
||||||
|
if (e.code() == DB::ErrorCodes::AUTHENTICATION_FAILED)
|
||||||
throw;
|
throw;
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (attempted_address_index == hosts_and_ports.size() - 1)
|
if (attempted_address_index == hosts_and_ports.size() - 1)
|
||||||
throw;
|
throw;
|
||||||
|
|
||||||
@ -501,7 +500,6 @@ void Client::connect()
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch);
|
server_version = toString(server_version_major) + "." + toString(server_version_minor) + "." + toString(server_version_patch);
|
||||||
load_suggestions = is_interactive && (server_revision >= Suggest::MIN_SERVER_REVISION) && !config().getBool("disable_suggestion", false);
|
load_suggestions = is_interactive && (server_revision >= Suggest::MIN_SERVER_REVISION) && !config().getBool("disable_suggestion", false);
|
||||||
@ -772,7 +770,7 @@ bool Client::processWithFuzzing(const String & full_query)
|
|||||||
else
|
else
|
||||||
this_query_runs = 1;
|
this_query_runs = 1;
|
||||||
}
|
}
|
||||||
else if (const auto * insert = orig_ast->as<ASTInsertQuery>())
|
else if (const auto * /*insert*/ _ = orig_ast->as<ASTInsertQuery>())
|
||||||
{
|
{
|
||||||
this_query_runs = 1;
|
this_query_runs = 1;
|
||||||
queries_for_fuzzed_tables = fuzzer.getInsertQueriesForFuzzedTables(full_query);
|
queries_for_fuzzed_tables = fuzzer.getInsertQueriesForFuzzedTables(full_query);
|
||||||
|
@ -46,7 +46,7 @@ public:
|
|||||||
path_from,
|
path_from,
|
||||||
disk_from.getDisk()->getName());
|
disk_from.getDisk()->getName());
|
||||||
}
|
}
|
||||||
else if (disk_from.getDisk()->isFile(path_from))
|
if (disk_from.getDisk()->isFile(path_from))
|
||||||
{
|
{
|
||||||
auto target_location = getTargetLocation(path_from, disk_to, path_to);
|
auto target_location = getTargetLocation(path_from, disk_to, path_to);
|
||||||
if (!disk_to.getDisk()->exists(target_location) || disk_to.getDisk()->isFile(target_location))
|
if (!disk_to.getDisk()->exists(target_location) || disk_to.getDisk()->isFile(target_location))
|
||||||
@ -77,7 +77,7 @@ public:
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory {} with directory {}", path_to, target_location);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot overwrite non-directory {} with directory {}", path_to, target_location);
|
||||||
}
|
}
|
||||||
else if (!disk_to.getDisk()->exists(target_location))
|
if (!disk_to.getDisk()->exists(target_location))
|
||||||
{
|
{
|
||||||
disk_to.getDisk()->createDirectory(target_location);
|
disk_to.getDisk()->createDirectory(target_location);
|
||||||
}
|
}
|
||||||
|
@ -72,13 +72,9 @@ private:
|
|||||||
auto path = [&]() -> String
|
auto path = [&]() -> String
|
||||||
{
|
{
|
||||||
if (relative_path.ends_with("/"))
|
if (relative_path.ends_with("/"))
|
||||||
{
|
|
||||||
return relative_path + file_name;
|
return relative_path + file_name;
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return relative_path + "/" + file_name;
|
return relative_path + "/" + file_name;
|
||||||
}
|
|
||||||
}();
|
}();
|
||||||
if (disk.isDirectory(path))
|
if (disk.isDirectory(path))
|
||||||
{
|
{
|
||||||
|
@ -53,12 +53,10 @@ public:
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot move '{}' to '{}': Directory not empty", path_from, target_location);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot move '{}' to '{}': Directory not empty", path_from, target_location);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
disk.getDisk()->moveDirectory(path_from, target_location);
|
disk.getDisk()->moveDirectory(path_from, target_location);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else if (!disk.getDisk()->exists(path_from))
|
else if (!disk.getDisk()->exists(path_from))
|
||||||
{
|
{
|
||||||
throw Exception(
|
throw Exception(
|
||||||
|
@ -32,17 +32,15 @@ public:
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} on disk {} doesn't exist", path, disk.getDisk()->getName());
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Path {} on disk {} doesn't exist", path, disk.getDisk()->getName());
|
||||||
}
|
}
|
||||||
else if (disk.getDisk()->isDirectory(path))
|
if (disk.getDisk()->isDirectory(path))
|
||||||
{
|
{
|
||||||
if (!recursive)
|
if (!recursive)
|
||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot remove '{}': Is a directory", path);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "cannot remove '{}': Is a directory", path);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
disk.getDisk()->removeRecursive(path);
|
disk.getDisk()->removeRecursive(path);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
disk.getDisk()->removeFileIfExists(path);
|
disk.getDisk()->removeFileIfExists(path);
|
||||||
|
@ -33,14 +33,10 @@ public:
|
|||||||
auto in = [&]() -> std::unique_ptr<ReadBufferFromFileBase>
|
auto in = [&]() -> std::unique_ptr<ReadBufferFromFileBase>
|
||||||
{
|
{
|
||||||
if (!path_from.has_value())
|
if (!path_from.has_value())
|
||||||
{
|
|
||||||
return std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
|
return std::make_unique<ReadBufferFromFileDescriptor>(STDIN_FILENO);
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
|
String relative_path_from = disk.getRelativeFromRoot(path_from.value());
|
||||||
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
|
return disk.getDisk()->readFile(relative_path_from, getReadSettings());
|
||||||
}
|
|
||||||
}();
|
}();
|
||||||
|
|
||||||
auto out = disk.getDisk()->writeFile(path_to);
|
auto out = disk.getDisk()->writeFile(path_to);
|
||||||
|
@ -127,13 +127,12 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
|
|||||||
}
|
}
|
||||||
return getEmptyCompletion(command->command_name);
|
return getEmptyCompletion(command->command_name);
|
||||||
}
|
}
|
||||||
else if (arguments.size() == 1)
|
if (arguments.size() == 1)
|
||||||
{
|
{
|
||||||
String command_prefix = arguments[0];
|
String command_prefix = arguments[0];
|
||||||
return getCommandsToComplete(command_prefix);
|
return getCommandsToComplete(command_prefix);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
String last_token = arguments.back();
|
String last_token = arguments.back();
|
||||||
CommandPtr command;
|
CommandPtr command;
|
||||||
try
|
try
|
||||||
@ -144,23 +143,17 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
|
|||||||
{
|
{
|
||||||
return {last_token};
|
return {last_token};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<String> answer = {};
|
std::vector<String> answer = {};
|
||||||
if (command->command_name == "help")
|
if (command->command_name == "help")
|
||||||
{
|
|
||||||
return getCommandsToComplete(last_token);
|
return getCommandsToComplete(last_token);
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
answer = [&]() -> std::vector<String>
|
answer = [&]() -> std::vector<String>
|
||||||
{
|
{
|
||||||
if (multidisk_commands.contains(command->command_name))
|
if (multidisk_commands.contains(command->command_name))
|
||||||
{
|
|
||||||
return client->getAllFilesByPatternFromAllDisks(last_token);
|
return client->getAllFilesByPatternFromAllDisks(last_token);
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
|
return client->getCurrentDiskWithPath().getAllFilesByPattern(last_token);
|
||||||
}
|
|
||||||
}();
|
}();
|
||||||
|
|
||||||
for (const auto & disk_name : client->getAllDiskNames())
|
for (const auto & disk_name : client->getAllDiskNames())
|
||||||
@ -178,18 +171,15 @@ std::vector<String> DisksApp::getCompletions(const String & prefix) const
|
|||||||
answer.push_back(option_sign);
|
answer.push_back(option_sign);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (!answer.empty())
|
if (!answer.empty())
|
||||||
{
|
{
|
||||||
std::sort(answer.begin(), answer.end());
|
std::sort(answer.begin(), answer.end());
|
||||||
return answer;
|
return answer;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
return {last_token};
|
return {last_token};
|
||||||
}
|
}
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
bool DisksApp::processQueryText(const String & text)
|
bool DisksApp::processQueryText(const String & text)
|
||||||
{
|
{
|
||||||
@ -210,11 +200,11 @@ bool DisksApp::processQueryText(const String & text)
|
|||||||
catch (DB::Exception & err)
|
catch (DB::Exception & err)
|
||||||
{
|
{
|
||||||
int code = getCurrentExceptionCode();
|
int code = getCurrentExceptionCode();
|
||||||
|
|
||||||
if (code == ErrorCodes::LOGICAL_ERROR)
|
if (code == ErrorCodes::LOGICAL_ERROR)
|
||||||
{
|
|
||||||
throw std::move(err);
|
throw std::move(err);
|
||||||
}
|
|
||||||
else if (code == ErrorCodes::BAD_ARGUMENTS)
|
if (code == ErrorCodes::BAD_ARGUMENTS)
|
||||||
{
|
{
|
||||||
std::cerr << err.message() << "\n"
|
std::cerr << err.message() << "\n"
|
||||||
<< "\n";
|
<< "\n";
|
||||||
|
@ -49,11 +49,9 @@ std::vector<String> DiskWithPath::listAllFilesByPath(const String & any_path) co
|
|||||||
disk->listFiles(getRelativeFromRoot(any_path), file_names);
|
disk->listFiles(getRelativeFromRoot(any_path), file_names);
|
||||||
return file_names;
|
return file_names;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) const
|
std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) const
|
||||||
{
|
{
|
||||||
@ -61,23 +59,15 @@ std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) c
|
|||||||
{
|
{
|
||||||
auto slash_pos = pattern.find_last_of('/');
|
auto slash_pos = pattern.find_last_of('/');
|
||||||
if (slash_pos >= pattern.size())
|
if (slash_pos >= pattern.size())
|
||||||
{
|
|
||||||
return {"", pattern};
|
return {"", pattern};
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
|
return {pattern.substr(0, slash_pos + 1), pattern.substr(slash_pos + 1, pattern.size() - slash_pos - 1)};
|
||||||
}
|
|
||||||
}();
|
}();
|
||||||
|
|
||||||
if (!isDirectory(path_before))
|
if (!isDirectory(path_before))
|
||||||
{
|
|
||||||
return {};
|
return {};
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
std::vector<String> file_names = listAllFilesByPath(path_before);
|
|
||||||
|
|
||||||
|
std::vector<String> file_names = listAllFilesByPath(path_before);
|
||||||
std::vector<String> answer;
|
std::vector<String> answer;
|
||||||
|
|
||||||
for (const auto & file_name : file_names)
|
for (const auto & file_name : file_names)
|
||||||
@ -93,7 +83,6 @@ std::vector<String> DiskWithPath::getAllFilesByPattern(const String & pattern) c
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
return answer;
|
return answer;
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
void DiskWithPath::setPath(const String & any_path)
|
void DiskWithPath::setPath(const String & any_path)
|
||||||
|
@ -39,13 +39,9 @@ DiskWithPath & ICommand::getDiskWithPath(DisksClient & client, const CommandLine
|
|||||||
{
|
{
|
||||||
auto disk_name = getValueFromCommandLineOptionsWithOptional<String>(options, name);
|
auto disk_name = getValueFromCommandLineOptionsWithOptional<String>(options, name);
|
||||||
if (disk_name.has_value())
|
if (disk_name.has_value())
|
||||||
{
|
|
||||||
return client.getDiskWithPath(disk_name.value());
|
return client.getDiskWithPath(disk_name.value());
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return client.getCurrentDiskWithPath();
|
return client.getCurrentDiskWithPath();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -63,40 +63,28 @@ protected:
|
|||||||
static T getValueFromCommandLineOptionsThrow(const CommandLineOptions & options, const String & name)
|
static T getValueFromCommandLineOptionsThrow(const CommandLineOptions & options, const String & name)
|
||||||
{
|
{
|
||||||
if (options.count(name))
|
if (options.count(name))
|
||||||
{
|
|
||||||
return getValueFromCommandLineOptions<T>(options, name);
|
return getValueFromCommandLineOptions<T>(options, name);
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
|
throw DB::Exception(ErrorCodes::BAD_ARGUMENTS, "Mandatory argument '{}' is missing", name);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static T getValueFromCommandLineOptionsWithDefault(const CommandLineOptions & options, const String & name, const T & default_value)
|
static T getValueFromCommandLineOptionsWithDefault(const CommandLineOptions & options, const String & name, const T & default_value)
|
||||||
{
|
{
|
||||||
if (options.count(name))
|
if (options.count(name))
|
||||||
{
|
|
||||||
return getValueFromCommandLineOptions<T>(options, name);
|
return getValueFromCommandLineOptions<T>(options, name);
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return default_value;
|
return default_value;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
static std::optional<T> getValueFromCommandLineOptionsWithOptional(const CommandLineOptions & options, const String & name)
|
static std::optional<T> getValueFromCommandLineOptionsWithOptional(const CommandLineOptions & options, const String & name)
|
||||||
{
|
{
|
||||||
if (options.count(name))
|
if (options.count(name))
|
||||||
{
|
|
||||||
return std::optional{getValueFromCommandLineOptions<T>(options, name)};
|
return std::optional{getValueFromCommandLineOptions<T>(options, name)};
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
DiskWithPath & getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name);
|
DiskWithPath & getDiskWithPath(DisksClient & client, const CommandLineOptions & options, const String & name);
|
||||||
|
|
||||||
|
@ -110,7 +110,7 @@ static auto executeScript(const std::string & command, bool throw_on_error = fal
|
|||||||
sh->wait();
|
sh->wait();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return sh->tryWait();
|
return sh->tryWait();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -26,8 +26,7 @@ CatBoostLibraryHandlerPtr CatBoostLibraryHandlerFactory::tryGetModel(const Strin
|
|||||||
|
|
||||||
if (found)
|
if (found)
|
||||||
return handler->second;
|
return handler->second;
|
||||||
else
|
|
||||||
{
|
|
||||||
if (create_if_not_found)
|
if (create_if_not_found)
|
||||||
{
|
{
|
||||||
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
|
auto new_handler = std::make_shared<CatBoostLibraryHandler>(library_path, model_path);
|
||||||
@ -37,7 +36,6 @@ CatBoostLibraryHandlerPtr CatBoostLibraryHandlerFactory::tryGetModel(const Strin
|
|||||||
}
|
}
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void CatBoostLibraryHandlerFactory::removeModel(const String & model_path)
|
void CatBoostLibraryHandlerFactory::removeModel(const String & model_path)
|
||||||
{
|
{
|
||||||
|
@ -25,7 +25,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
|
|||||||
{
|
{
|
||||||
if (uri.getPath() == "/extdict_ping")
|
if (uri.getPath() == "/extdict_ping")
|
||||||
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(getContext());
|
return std::make_unique<ExternalDictionaryLibraryBridgeExistsHandler>(getContext());
|
||||||
else if (uri.getPath() == "/catboost_ping")
|
if (uri.getPath() == "/catboost_ping")
|
||||||
return std::make_unique<CatBoostLibraryBridgeExistsHandler>(getContext());
|
return std::make_unique<CatBoostLibraryBridgeExistsHandler>(getContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -33,7 +33,7 @@ std::unique_ptr<HTTPRequestHandler> LibraryBridgeHandlerFactory::createRequestHa
|
|||||||
{
|
{
|
||||||
if (uri.getPath() == "/extdict_request")
|
if (uri.getPath() == "/extdict_request")
|
||||||
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(getContext());
|
return std::make_unique<ExternalDictionaryLibraryBridgeRequestHandler>(getContext());
|
||||||
else if (uri.getPath() == "/catboost_request")
|
if (uri.getPath() == "/catboost_request")
|
||||||
return std::make_unique<CatBoostLibraryBridgeRequestHandler>(getContext());
|
return std::make_unique<CatBoostLibraryBridgeRequestHandler>(getContext());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,12 +158,10 @@ void ExternalDictionaryLibraryBridgeRequestHandler::handleRequest(HTTPServerRequ
|
|||||||
out.finalize();
|
out.finalize();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
|
LOG_TRACE(log, "Cannot clone from dictionary with id: {}, will call extDict_libNew instead", from_dictionary_id);
|
||||||
lib_new = true;
|
lib_new = true;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if (lib_new)
|
if (lib_new)
|
||||||
{
|
{
|
||||||
auto & read_buf = request.getStream();
|
auto & read_buf = request.getStream();
|
||||||
|
@ -518,7 +518,9 @@ try
|
|||||||
|
|
||||||
SCOPE_EXIT({ cleanup(); });
|
SCOPE_EXIT({ cleanup(); });
|
||||||
|
|
||||||
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")));
|
initTTYBuffer(toProgressOption(getClientConfiguration().getString("progress", "default")),
|
||||||
|
toProgressOption(config().getString("progress-table", "default")));
|
||||||
|
initKeystrokeInterceptor();
|
||||||
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
ASTAlterCommand::setFormatAlterCommandsWithParentheses(true);
|
||||||
|
|
||||||
/// try to load user defined executable functions, throw on error and die
|
/// try to load user defined executable functions, throw on error and die
|
||||||
|
@ -231,7 +231,7 @@ static Int64 transformSigned(Int64 x, UInt64 seed)
|
|||||||
{
|
{
|
||||||
if (x >= 0)
|
if (x >= 0)
|
||||||
return transform(x, seed);
|
return transform(x, seed);
|
||||||
else
|
|
||||||
return -transform(-x, seed); /// It works Ok even for minimum signed number.
|
return -transform(-x, seed); /// It works Ok even for minimum signed number.
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1105,7 +1105,7 @@ public:
|
|||||||
{
|
{
|
||||||
if (isUInt(data_type))
|
if (isUInt(data_type))
|
||||||
return std::make_unique<UnsignedIntegerModel>(seed);
|
return std::make_unique<UnsignedIntegerModel>(seed);
|
||||||
else
|
|
||||||
return std::make_unique<SignedIntegerModel>(seed);
|
return std::make_unique<SignedIntegerModel>(seed);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -39,11 +39,11 @@ IdentifierQuotingStyle getQuotingStyle(nanodbc::ConnectionHolderPtr connection)
|
|||||||
auto identifier_quote = getIdentifierQuote(connection);
|
auto identifier_quote = getIdentifierQuote(connection);
|
||||||
if (identifier_quote.empty())
|
if (identifier_quote.empty())
|
||||||
return IdentifierQuotingStyle::Backticks;
|
return IdentifierQuotingStyle::Backticks;
|
||||||
else if (identifier_quote[0] == '`')
|
if (identifier_quote[0] == '`')
|
||||||
return IdentifierQuotingStyle::Backticks;
|
return IdentifierQuotingStyle::Backticks;
|
||||||
else if (identifier_quote[0] == '"')
|
if (identifier_quote[0] == '"')
|
||||||
return IdentifierQuotingStyle::DoubleQuotes;
|
return IdentifierQuotingStyle::DoubleQuotes;
|
||||||
else
|
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT,
|
||||||
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
|
"Can not map quote identifier '{}' to IdentifierQuotingStyle value", identifier_quote);
|
||||||
}
|
}
|
||||||
|
@ -127,7 +127,7 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
|||||||
|
|
||||||
if (*pos == '{')
|
if (*pos == '{')
|
||||||
return read_escaped_value();
|
return read_escaped_value();
|
||||||
else
|
|
||||||
return read_plain_value();
|
return read_plain_value();
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -206,13 +206,11 @@ std::string validateODBCConnectionString(const std::string & connection_string)
|
|||||||
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
|
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
|
reconstructed_connection_string.append(value_pos, next_pos - value_pos);
|
||||||
reconstructed_connection_string.append("}}");
|
reconstructed_connection_string.append("}}");
|
||||||
value_pos = next_pos + 1;
|
value_pos = next_pos + 1;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
reconstructed_connection_string += '}';
|
reconstructed_connection_string += '}';
|
||||||
};
|
};
|
||||||
|
@ -158,6 +158,11 @@ namespace Setting
|
|||||||
extern const SettingsSeconds send_timeout;
|
extern const SettingsSeconds send_timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
namespace MergeTreeSetting
|
||||||
|
{
|
||||||
|
extern const MergeTreeSettingsBool allow_remote_fs_zero_copy_replication;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
namespace CurrentMetrics
|
namespace CurrentMetrics
|
||||||
@ -599,7 +604,7 @@ void sanityChecks(Server & server)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
if (server.context()->getMergeTreeSettings().allow_remote_fs_zero_copy_replication)
|
if (server.context()->getMergeTreeSettings()[MergeTreeSetting::allow_remote_fs_zero_copy_replication])
|
||||||
{
|
{
|
||||||
server.context()->addWarningMessage("The setting 'allow_remote_fs_zero_copy_replication' is enabled for MergeTree tables."
|
server.context()->addWarningMessage("The setting 'allow_remote_fs_zero_copy_replication' is enabled for MergeTree tables."
|
||||||
" But the feature of 'zero-copy replication' is under development and is not ready for production."
|
" But the feature of 'zero-copy replication' is under development and is not ready for production."
|
||||||
@ -628,7 +633,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex
|
|||||||
auto condition_write_buffer = WriteBufferFromOwnString();
|
auto condition_write_buffer = WriteBufferFromOwnString();
|
||||||
|
|
||||||
LOG_DEBUG(log, "Checking startup query condition `{}`", condition);
|
LOG_DEBUG(log, "Checking startup query condition `{}`", condition);
|
||||||
executeQuery(condition_read_buffer, condition_write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
auto startup_context = Context::createCopy(context);
|
||||||
|
startup_context->makeQueryContext();
|
||||||
|
executeQuery(condition_read_buffer, condition_write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
||||||
|
|
||||||
auto result = condition_write_buffer.str();
|
auto result = condition_write_buffer.str();
|
||||||
|
|
||||||
@ -648,7 +655,9 @@ void loadStartupScripts(const Poco::Util::AbstractConfiguration & config, Contex
|
|||||||
auto write_buffer = WriteBufferFromOwnString();
|
auto write_buffer = WriteBufferFromOwnString();
|
||||||
|
|
||||||
LOG_DEBUG(log, "Executing query `{}`", query);
|
LOG_DEBUG(log, "Executing query `{}`", query);
|
||||||
executeQuery(read_buffer, write_buffer, true, context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
auto startup_context = Context::createCopy(context);
|
||||||
|
startup_context->makeQueryContext();
|
||||||
|
executeQuery(read_buffer, write_buffer, true, startup_context, callback, QueryFlags{ .internal = true }, std::nullopt, {});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (...)
|
catch (...)
|
||||||
@ -1125,9 +1134,6 @@ try
|
|||||||
/// We need to reload server settings because config could be updated via zookeeper.
|
/// We need to reload server settings because config could be updated via zookeeper.
|
||||||
server_settings.loadSettingsFromConfig(config());
|
server_settings.loadSettingsFromConfig(config());
|
||||||
|
|
||||||
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
|
|
||||||
sanityChecks(*this);
|
|
||||||
|
|
||||||
#if defined(OS_LINUX)
|
#if defined(OS_LINUX)
|
||||||
std::string executable_path = getExecutablePath();
|
std::string executable_path = getExecutablePath();
|
||||||
|
|
||||||
@ -2019,6 +2025,11 @@ try
|
|||||||
if (!filesystem_caches_path.empty())
|
if (!filesystem_caches_path.empty())
|
||||||
global_context->setFilesystemCachesPath(filesystem_caches_path);
|
global_context->setFilesystemCachesPath(filesystem_caches_path);
|
||||||
|
|
||||||
|
/// NOTE: Do sanity checks after we loaded all possible substitutions (for the configuration) from ZK
|
||||||
|
/// Additionally, making the check after the default profile is initialized.
|
||||||
|
/// It is important to initialize MergeTreeSettings after Settings, to support compatibility for MergeTreeSettings.
|
||||||
|
sanityChecks(*this);
|
||||||
|
|
||||||
/// Check sanity of MergeTreeSettings on server startup
|
/// Check sanity of MergeTreeSettings on server startup
|
||||||
{
|
{
|
||||||
size_t background_pool_tasks = global_context->getMergeMutateExecutor()->getMaxTasksCount();
|
size_t background_pool_tasks = global_context->getMergeMutateExecutor()->getMaxTasksCount();
|
||||||
|
@ -116,16 +116,14 @@ namespace
|
|||||||
reading_dependents = false;
|
reading_dependents = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else if (line == "DEPENDENTS")
|
if (line == "DEPENDENTS")
|
||||||
{
|
{
|
||||||
reading_dependents = true;
|
reading_dependents = true;
|
||||||
reading_dependencies = false;
|
reading_dependencies = false;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
else if (line.empty())
|
if (line.empty())
|
||||||
{
|
|
||||||
continue;
|
continue;
|
||||||
}
|
|
||||||
|
|
||||||
size_t separator1 = line.find('\t');
|
size_t separator1 = line.find('\t');
|
||||||
size_t separator2 = line.find('\t', separator1 + 1);
|
size_t separator2 = line.find('\t', separator1 + 1);
|
||||||
|
@ -22,8 +22,10 @@
|
|||||||
#include <Backups/RestorerFromBackup.h>
|
#include <Backups/RestorerFromBackup.h>
|
||||||
#include <Core/Settings.h>
|
#include <Core/Settings.h>
|
||||||
#include <base/defines.h>
|
#include <base/defines.h>
|
||||||
|
#include <base/range.h>
|
||||||
#include <IO/Operators.h>
|
#include <IO/Operators.h>
|
||||||
#include <Common/re2.h>
|
#include <Common/re2.h>
|
||||||
|
|
||||||
#include <Poco/AccessExpireCache.h>
|
#include <Poco/AccessExpireCache.h>
|
||||||
#include <boost/algorithm/string/join.hpp>
|
#include <boost/algorithm/string/join.hpp>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
@ -132,8 +134,8 @@ public:
|
|||||||
"' registered for user-defined settings",
|
"' registered for user-defined settings",
|
||||||
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
|
String{setting_name}, boost::algorithm::join(registered_prefixes, "' or '"));
|
||||||
}
|
}
|
||||||
else
|
|
||||||
BaseSettingsHelpers::throwSettingNotFound(setting_name);
|
throw Exception(ErrorCodes::UNKNOWN_SETTING, "Unknown setting '{}'", String{setting_name});
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
@ -828,7 +830,6 @@ std::shared_ptr<const EnabledQuota> AccessControl::getAuthenticationQuota(
|
|||||||
quota_key,
|
quota_key,
|
||||||
throw_if_client_key_empty);
|
throw_if_client_key_empty);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1155,9 +1155,6 @@ private:
|
|||||||
|
|
||||||
calculateMinMaxFlags();
|
calculateMinMaxFlags();
|
||||||
|
|
||||||
if (!isLeaf())
|
|
||||||
return;
|
|
||||||
|
|
||||||
auto new_flags = function(flags, min_flags_with_children, max_flags_with_children, level, grant_option);
|
auto new_flags = function(flags, min_flags_with_children, max_flags_with_children, level, grant_option);
|
||||||
|
|
||||||
if (new_flags != flags)
|
if (new_flags != flags)
|
||||||
@ -1483,16 +1480,16 @@ bool AccessRights::isGrantedImplHelper(const AccessRightsElement & element) cons
|
|||||||
{
|
{
|
||||||
if (element.anyParameter())
|
if (element.anyParameter())
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
|
||||||
else
|
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.parameter);
|
||||||
}
|
}
|
||||||
else if (element.anyDatabase())
|
if (element.anyDatabase())
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags);
|
||||||
else if (element.anyTable())
|
if (element.anyTable())
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database);
|
||||||
else if (element.anyColumn())
|
if (element.anyColumn())
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table);
|
||||||
else
|
|
||||||
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
|
return isGrantedImpl<grant_option, wildcard>(element.access_flags, element.database, element.table, element.columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1503,17 +1500,15 @@ bool AccessRights::isGrantedImpl(const AccessRightsElement & element) const
|
|||||||
{
|
{
|
||||||
if (element.grant_option)
|
if (element.grant_option)
|
||||||
return isGrantedImplHelper<true, true>(element);
|
return isGrantedImplHelper<true, true>(element);
|
||||||
else
|
|
||||||
return isGrantedImplHelper<grant_option, true>(element);
|
return isGrantedImplHelper<grant_option, true>(element);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
if (element.grant_option)
|
if (element.grant_option)
|
||||||
return isGrantedImplHelper<true, wildcard>(element);
|
return isGrantedImplHelper<true, wildcard>(element);
|
||||||
else
|
|
||||||
return isGrantedImplHelper<grant_option, wildcard>(element);
|
return isGrantedImplHelper<grant_option, wildcard>(element);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template <bool grant_option, bool wildcard>
|
template <bool grant_option, bool wildcard>
|
||||||
bool AccessRights::isGrantedImpl(const AccessRightsElements & elements) const
|
bool AccessRights::isGrantedImpl(const AccessRightsElements & elements) const
|
||||||
|
@ -501,10 +501,9 @@ AuthenticationData AuthenticationData::fromAST(const ASTAuthenticationData & que
|
|||||||
auth_data.setPasswordHashBinary(AuthenticationData::Util::stringToDigest(value));
|
auth_data.setPasswordHashBinary(AuthenticationData::Util::stringToDigest(value));
|
||||||
return auth_data;
|
return auth_data;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
auth_data.setPasswordHashHex(value);
|
auth_data.setPasswordHashHex(value);
|
||||||
}
|
|
||||||
|
|
||||||
if (query.type == AuthenticationType::SHA256_PASSWORD && args_size == 2)
|
if (query.type == AuthenticationType::SHA256_PASSWORD && args_size == 2)
|
||||||
{
|
{
|
||||||
|
@ -247,7 +247,6 @@ namespace
|
|||||||
const auto & unused_node = *(owned_nodes.begin()->second);
|
const auto & unused_node = *(owned_nodes.begin()->second);
|
||||||
if (unused_node.node_type == UNKNOWN)
|
if (unused_node.node_type == UNKNOWN)
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Parent group '{}' not found", unused_node.keyword);
|
||||||
else
|
|
||||||
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Access type '{}' should have parent group", unused_node.keyword);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,7 @@ namespace
|
|||||||
{
|
{
|
||||||
if (addr.family() == IPAddress::Family::IPv4 && addr_v6 == toIPv6(addr))
|
if (addr.family() == IPAddress::Family::IPv4 && addr_v6 == toIPv6(addr))
|
||||||
return true;
|
return true;
|
||||||
else if (addr.family() == IPAddress::Family::IPv6 && addr_v6 == addr)
|
if (addr.family() == IPAddress::Family::IPv6 && addr_v6 == addr)
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -267,9 +267,8 @@ String AllowedClientHosts::IPSubnet::toString() const
|
|||||||
unsigned int prefix_length = mask.prefixLength();
|
unsigned int prefix_length = mask.prefixLength();
|
||||||
if (isMaskAllBitsOne())
|
if (isMaskAllBitsOne())
|
||||||
return prefix.toString();
|
return prefix.toString();
|
||||||
else if (IPAddress{prefix_length, mask.family()} == mask)
|
if (IPAddress{prefix_length, mask.family()} == mask)
|
||||||
return fs::path(prefix.toString()) / std::to_string(prefix_length);
|
return fs::path(prefix.toString()) / std::to_string(prefix_length);
|
||||||
else
|
|
||||||
return fs::path(prefix.toString()) / mask.toString();
|
return fs::path(prefix.toString()) / mask.toString();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -575,11 +574,10 @@ bool AllowedClientHosts::contains(const IPAddress & client_address) const
|
|||||||
parseLikePattern(pattern, subnet, name, name_regexp);
|
parseLikePattern(pattern, subnet, name, name_regexp);
|
||||||
if (subnet)
|
if (subnet)
|
||||||
return check_subnet(*subnet);
|
return check_subnet(*subnet);
|
||||||
else if (name)
|
if (name)
|
||||||
return check_name(*name);
|
return check_name(*name);
|
||||||
else if (name_regexp)
|
if (name_regexp)
|
||||||
return check_name_regexp(*name_regexp);
|
return check_name_regexp(*name_regexp);
|
||||||
else
|
|
||||||
return false;
|
return false;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -28,7 +28,6 @@ String QuotaTypeInfo::valueToString(QuotaValue value) const
|
|||||||
{
|
{
|
||||||
if (!(value % output_denominator))
|
if (!(value % output_denominator))
|
||||||
return std::to_string(value / output_denominator);
|
return std::to_string(value / output_denominator);
|
||||||
else
|
|
||||||
return toString(static_cast<double>(value) / output_denominator);
|
return toString(static_cast<double>(value) / output_denominator);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -36,7 +35,6 @@ QuotaValue QuotaTypeInfo::stringToValue(const String & str) const
|
|||||||
{
|
{
|
||||||
if (output_denominator == 1)
|
if (output_denominator == 1)
|
||||||
return static_cast<QuotaValue>(parse<UInt64>(str));
|
return static_cast<QuotaValue>(parse<UInt64>(str));
|
||||||
else
|
|
||||||
return static_cast<QuotaValue>(parse<Float64>(str) * output_denominator);
|
return static_cast<QuotaValue>(parse<Float64>(str) * output_denominator);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +64,27 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
AccessRights addImplicitAccessRights(const AccessRights & access, const AccessControl & access_control)
|
std::array<UUID, 1> to_array(const UUID & id)
|
||||||
|
{
|
||||||
|
std::array<UUID, 1> ids;
|
||||||
|
ids[0] = id;
|
||||||
|
return ids;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Helper for using in templates.
|
||||||
|
std::string_view getDatabase() { return {}; }
|
||||||
|
|
||||||
|
template <typename... OtherArgs>
|
||||||
|
std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
||||||
|
|
||||||
|
std::string_view getTableEngine() { return {}; }
|
||||||
|
|
||||||
|
template <typename... OtherArgs>
|
||||||
|
std::string_view getTableEngine(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
AccessRights ContextAccess::addImplicitAccessRights(const AccessRights & access, const AccessControl & access_control)
|
||||||
{
|
{
|
||||||
AccessFlags max_flags;
|
AccessFlags max_flags;
|
||||||
|
|
||||||
@ -254,26 +274,6 @@ namespace
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::array<UUID, 1> to_array(const UUID & id)
|
|
||||||
{
|
|
||||||
std::array<UUID, 1> ids;
|
|
||||||
ids[0] = id;
|
|
||||||
return ids;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper for using in templates.
|
|
||||||
std::string_view getDatabase() { return {}; }
|
|
||||||
|
|
||||||
template <typename... OtherArgs>
|
|
||||||
std::string_view getDatabase(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
|
||||||
|
|
||||||
std::string_view getTableEngine() { return {}; }
|
|
||||||
|
|
||||||
template <typename... OtherArgs>
|
|
||||||
std::string_view getTableEngine(std::string_view arg1, const OtherArgs &...) { return arg1; }
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
std::shared_ptr<const ContextAccess> ContextAccess::fromContext(const ContextPtr & context)
|
std::shared_ptr<const ContextAccess> ContextAccess::fromContext(const ContextPtr & context)
|
||||||
{
|
{
|
||||||
return ContextAccessWrapper::fromContext(context)->getAccess();
|
return ContextAccessWrapper::fromContext(context)->getAccess();
|
||||||
@ -728,7 +728,6 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, AccessFlag
|
|||||||
"For queries over HTTP, method GET implies readonly. "
|
"For queries over HTTP, method GET implies readonly. "
|
||||||
"You should use method POST for modifying queries");
|
"You should use method POST for modifying queries");
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return access_denied(ErrorCodes::READONLY, "{}: Cannot execute query in readonly mode");
|
return access_denied(ErrorCodes::READONLY, "{}: Cannot execute query in readonly mode");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -770,16 +769,16 @@ bool ContextAccess::checkAccessImplHelper(const ContextPtr & context, const Acce
|
|||||||
{
|
{
|
||||||
if (element.anyParameter())
|
if (element.anyParameter())
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
|
||||||
else
|
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.parameter);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.parameter);
|
||||||
}
|
}
|
||||||
else if (element.anyDatabase())
|
if (element.anyDatabase())
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags);
|
||||||
else if (element.anyTable())
|
if (element.anyTable())
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database);
|
||||||
else if (element.anyColumn())
|
if (element.anyColumn())
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table);
|
||||||
else
|
|
||||||
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table, element.columns);
|
return checkAccessImpl<throw_if_denied, grant_option, wildcard>(context, element.access_flags, element.database, element.table, element.columns);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -790,17 +789,15 @@ bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRigh
|
|||||||
{
|
{
|
||||||
if (element.grant_option)
|
if (element.grant_option)
|
||||||
return checkAccessImplHelper<throw_if_denied, true, true>(context, element);
|
return checkAccessImplHelper<throw_if_denied, true, true>(context, element);
|
||||||
else
|
|
||||||
return checkAccessImplHelper<throw_if_denied, grant_option, true>(context, element);
|
return checkAccessImplHelper<throw_if_denied, grant_option, true>(context, element);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
if (element.grant_option)
|
if (element.grant_option)
|
||||||
return checkAccessImplHelper<throw_if_denied, true, wildcard>(context, element);
|
return checkAccessImplHelper<throw_if_denied, true, wildcard>(context, element);
|
||||||
else
|
|
||||||
return checkAccessImplHelper<throw_if_denied, grant_option, wildcard>(context, element);
|
return checkAccessImplHelper<throw_if_denied, grant_option, wildcard>(context, element);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template <bool throw_if_denied, bool grant_option, bool wildcard>
|
template <bool throw_if_denied, bool grant_option, bool wildcard>
|
||||||
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElements & elements) const
|
bool ContextAccess::checkAccessImpl(const ContextPtr & context, const AccessRightsElements & elements) const
|
||||||
|
@ -1,18 +1,17 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Access/AccessRights.h>
|
|
||||||
#include <Access/ContextAccessParams.h>
|
|
||||||
#include <Access/EnabledRowPolicies.h>
|
|
||||||
#include <Interpreters/ClientInfo.h>
|
|
||||||
#include <Access/QuotaUsage.h>
|
|
||||||
#include <Common/SettingsChanges.h>
|
|
||||||
#include <Core/UUID.h>
|
|
||||||
#include <base/scope_guard.h>
|
|
||||||
#include <boost/container/flat_set.hpp>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
#include <optional>
|
#include <optional>
|
||||||
#include <unordered_map>
|
#include <unordered_map>
|
||||||
|
#include <Access/AccessRights.h>
|
||||||
|
#include <Access/ContextAccessParams.h>
|
||||||
|
#include <Access/EnabledRowPolicies.h>
|
||||||
|
#include <Access/QuotaUsage.h>
|
||||||
|
#include <Core/UUID.h>
|
||||||
|
#include <Interpreters/ClientInfo.h>
|
||||||
|
#include <base/scope_guard.h>
|
||||||
|
#include <Common/SettingsChanges.h>
|
||||||
|
|
||||||
|
|
||||||
namespace Poco { class Logger; }
|
namespace Poco { class Logger; }
|
||||||
@ -133,6 +132,8 @@ public:
|
|||||||
/// Checks if grantees are allowed for the current user, throws an exception if not.
|
/// Checks if grantees are allowed for the current user, throws an exception if not.
|
||||||
void checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const;
|
void checkGranteesAreAllowed(const std::vector<UUID> & grantee_ids) const;
|
||||||
|
|
||||||
|
static AccessRights addImplicitAccessRights(const AccessRights & access, const AccessControl & access_control);
|
||||||
|
|
||||||
ContextAccess(const AccessControl & access_control_, const Params & params_);
|
ContextAccess(const AccessControl & access_control_, const Params & params_);
|
||||||
~ContextAccess();
|
~ContextAccess();
|
||||||
|
|
||||||
|
@ -89,9 +89,8 @@ bool operator ==(const ContextAccessParams & left, const ContextAccessParams & r
|
|||||||
{
|
{
|
||||||
if (!x)
|
if (!x)
|
||||||
return !y;
|
return !y;
|
||||||
else if (!y)
|
if (!y)
|
||||||
return false;
|
return false;
|
||||||
else
|
|
||||||
return *x == *y;
|
return *x == *y;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
@ -132,22 +131,20 @@ bool operator <(const ContextAccessParams & left, const ContextAccessParams & ri
|
|||||||
{
|
{
|
||||||
if (!x)
|
if (!x)
|
||||||
return y ? -1 : 0;
|
return y ? -1 : 0;
|
||||||
else if (!y)
|
if (!y)
|
||||||
return 1;
|
return 1;
|
||||||
else if (*x == *y)
|
if (*x == *y)
|
||||||
return 0;
|
return 0;
|
||||||
else if (*x < *y)
|
if (*x < *y)
|
||||||
return -1;
|
return -1;
|
||||||
else
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
if (x == y)
|
if (x == y)
|
||||||
return 0;
|
return 0;
|
||||||
else if (x < y)
|
if (x < y)
|
||||||
return -1;
|
return -1;
|
||||||
else
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1,9 +1,8 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <base/types.h>
|
|
||||||
#include <boost/container/flat_set.hpp>
|
|
||||||
#include <Access/Common/SSLCertificateSubjects.h>
|
|
||||||
#include <memory>
|
#include <memory>
|
||||||
|
#include <Access/Common/SSLCertificateSubjects.h>
|
||||||
|
#include <base/types.h>
|
||||||
|
|
||||||
#include "config.h"
|
#include "config.h"
|
||||||
|
|
||||||
|
@ -166,7 +166,6 @@ void KerberosInit::init(const String & keytab_file, const String & principal, co
|
|||||||
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
|
ret = krb5_get_init_creds_keytab(k5.ctx, &my_creds, k5.me, keytab, 0, nullptr, options);
|
||||||
if (ret)
|
if (ret)
|
||||||
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials: {}", fmtError(ret));
|
throw Exception(ErrorCodes::KERBEROS_ERROR, "Error in getting initial credentials: {}", fmtError(ret));
|
||||||
else
|
|
||||||
LOG_TRACE(log, "Got initial credentials");
|
LOG_TRACE(log, "Got initial credentials");
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
|
@ -330,10 +330,7 @@ std::set<String> LDAPAccessStorage::mapExternalRolesNoLock(const LDAPClient::Sea
|
|||||||
|
|
||||||
for (const auto & external_role : external_role_set)
|
for (const auto & external_role : external_role_set)
|
||||||
{
|
{
|
||||||
if (
|
if (prefix.size() < external_role.size() && external_role.starts_with(prefix))
|
||||||
prefix.size() < external_role.size() &&
|
|
||||||
external_role.compare(0, prefix.size(), prefix) == 0
|
|
||||||
)
|
|
||||||
{
|
{
|
||||||
role_names.emplace(external_role, prefix.size());
|
role_names.emplace(external_role, prefix.size());
|
||||||
}
|
}
|
||||||
|
@ -62,7 +62,6 @@ String QuotaCache::QuotaInfo::calculateKey(const EnabledQuota & enabled, bool th
|
|||||||
"Quota {} (for user {}) requires a client supplied key.",
|
"Quota {} (for user {}) requires a client supplied key.",
|
||||||
quota->getName(),
|
quota->getName(),
|
||||||
params.user_name);
|
params.user_name);
|
||||||
else
|
|
||||||
return ""; // Authentication quota has no client key at time of authentication.
|
return ""; // Authentication quota has no client key at time of authentication.
|
||||||
}
|
}
|
||||||
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
|
case QuotaKeyType::CLIENT_KEY_OR_USER_NAME:
|
||||||
|
@ -1,10 +1,9 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <Access/EnabledRoles.h>
|
|
||||||
#include <Poco/AccessExpireCache.h>
|
|
||||||
#include <boost/container/flat_set.hpp>
|
|
||||||
#include <map>
|
#include <map>
|
||||||
#include <mutex>
|
#include <mutex>
|
||||||
|
#include <Access/EnabledRoles.h>
|
||||||
|
#include <Poco/AccessExpireCache.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
|
@ -80,15 +80,13 @@ void RolesOrUsersSet::init(const ASTRolesOrUsersSet & ast, const AccessControl *
|
|||||||
return *id;
|
return *id;
|
||||||
return access_control->getID<Role>(name);
|
return access_control->getID<Role>(name);
|
||||||
}
|
}
|
||||||
else if (ast.allow_users)
|
if (ast.allow_users)
|
||||||
{
|
{
|
||||||
return access_control->getID<User>(name);
|
return access_control->getID<User>(name);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
assert(ast.allow_roles);
|
assert(ast.allow_roles);
|
||||||
return access_control->getID<Role>(name);
|
return access_control->getID<Role>(name);
|
||||||
}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
if (!ast.names.empty() && !all)
|
if (!ast.names.empty() && !all)
|
||||||
|
@ -28,7 +28,7 @@ SettingsAuthResponseParser::parse(const Poco::Net::HTTPResponse & response, std:
|
|||||||
try
|
try
|
||||||
{
|
{
|
||||||
Poco::Dynamic::Var json = parser.parse(*body_stream);
|
Poco::Dynamic::Var json = parser.parse(*body_stream);
|
||||||
Poco::JSON::Object::Ptr obj = json.extract<Poco::JSON::Object::Ptr>();
|
const Poco::JSON::Object::Ptr & obj = json.extract<Poco::JSON::Object::Ptr>();
|
||||||
Poco::JSON::Object::Ptr settings_obj = obj->getObject(settings_key);
|
Poco::JSON::Object::Ptr settings_obj = obj->getObject(settings_key);
|
||||||
|
|
||||||
if (settings_obj)
|
if (settings_obj)
|
||||||
|
@ -54,7 +54,6 @@ SettingSourceRestrictions getSettingSourceRestrictions(std::string_view name)
|
|||||||
auto settingConstraintIter = SETTINGS_SOURCE_RESTRICTIONS.find(name);
|
auto settingConstraintIter = SETTINGS_SOURCE_RESTRICTIONS.find(name);
|
||||||
if (settingConstraintIter != SETTINGS_SOURCE_RESTRICTIONS.end())
|
if (settingConstraintIter != SETTINGS_SOURCE_RESTRICTIONS.end())
|
||||||
return settingConstraintIter->second;
|
return settingConstraintIter->second;
|
||||||
else
|
|
||||||
return SettingSourceRestrictions(); // allows everything
|
return SettingSourceRestrictions(); // allows everything
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +309,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
{
|
{
|
||||||
if (reaction == THROW_ON_VIOLATION)
|
if (reaction == THROW_ON_VIOLATION)
|
||||||
throw Exception(explain, code);
|
throw Exception(explain, code);
|
||||||
else
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -335,7 +333,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
{
|
{
|
||||||
if (reaction == THROW_ON_VIOLATION)
|
if (reaction == THROW_ON_VIOLATION)
|
||||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
|
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} should not be changed", setting_name);
|
||||||
else
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -351,7 +348,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
max_value,
|
max_value,
|
||||||
min_value,
|
min_value,
|
||||||
setting_name);
|
setting_name);
|
||||||
else
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -362,7 +358,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be less than {}",
|
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be less than {}",
|
||||||
setting_name, applyVisitor(FieldVisitorToString(), min_value));
|
setting_name, applyVisitor(FieldVisitorToString(), min_value));
|
||||||
}
|
}
|
||||||
else
|
|
||||||
change.value = min_value;
|
change.value = min_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -373,7 +368,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be greater than {}",
|
throw Exception(ErrorCodes::SETTING_CONSTRAINT_VIOLATION, "Setting {} shouldn't be greater than {}",
|
||||||
setting_name, applyVisitor(FieldVisitorToString(), max_value));
|
setting_name, applyVisitor(FieldVisitorToString(), max_value));
|
||||||
}
|
}
|
||||||
else
|
|
||||||
change.value = max_value;
|
change.value = max_value;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -381,7 +375,6 @@ bool SettingsConstraints::Checker::check(SettingChange & change,
|
|||||||
{
|
{
|
||||||
if (reaction == THROW_ON_VIOLATION)
|
if (reaction == THROW_ON_VIOLATION)
|
||||||
throw Exception(ErrorCodes::READONLY, "Setting {} is not allowed to be set by {}", setting_name, toString(source));
|
throw Exception(ErrorCodes::READONLY, "Setting {} is not allowed to be set by {}", setting_name, toString(source));
|
||||||
else
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -431,8 +424,8 @@ SettingsConstraints::Checker SettingsConstraints::getMergeTreeChecker(std::strin
|
|||||||
auto full_name = settingFullName<MergeTreeSettings>(short_name);
|
auto full_name = settingFullName<MergeTreeSettings>(short_name);
|
||||||
auto it = constraints.find(resolveSettingNameWithCache(full_name));
|
auto it = constraints.find(resolveSettingNameWithCache(full_name));
|
||||||
if (it == constraints.end())
|
if (it == constraints.end())
|
||||||
return Checker(MergeTreeSettings::Traits::resolveName); // Allowed
|
return Checker(MergeTreeSettings::resolveName); // Allowed
|
||||||
return Checker(it->second, MergeTreeSettings::Traits::resolveName);
|
return Checker(it->second, MergeTreeSettings::resolveName);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool SettingsConstraints::Constraint::operator==(const Constraint & other) const
|
bool SettingsConstraints::Constraint::operator==(const Constraint & other) const
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
#include <cstring>
|
#include <cstring>
|
||||||
#include <filesystem>
|
#include <filesystem>
|
||||||
#include <base/FnTraits.h>
|
#include <base/FnTraits.h>
|
||||||
|
#include <base/range.h>
|
||||||
|
|
||||||
|
|
||||||
namespace DB
|
namespace DB
|
||||||
@ -648,7 +649,6 @@ namespace
|
|||||||
{
|
{
|
||||||
if (users_without_row_policies_can_read_rows)
|
if (users_without_row_policies_can_read_rows)
|
||||||
continue;
|
continue;
|
||||||
else
|
|
||||||
filter = "1";
|
filter = "1";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,13 +157,13 @@ public:
|
|||||||
d.status = static_cast<Data::Status>(k);
|
d.status = static_cast<Data::Status>(k);
|
||||||
if (d.status == Data::Status::NotSet)
|
if (d.status == Data::Status::NotSet)
|
||||||
return;
|
return;
|
||||||
else if (d.status == Data::Status::SetNull)
|
if (d.status == Data::Status::SetNull)
|
||||||
{
|
{
|
||||||
if (!returns_nullable_type)
|
if (!returns_nullable_type)
|
||||||
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type (NULL) in non-nullable {}State", getName());
|
throw Exception(ErrorCodes::INCORRECT_DATA, "Incorrect type (NULL) in non-nullable {}State", getName());
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
else if (d.status == Data::Status::SetOther)
|
if (d.status == Data::Status::SetOther)
|
||||||
{
|
{
|
||||||
serialization->deserializeBinary(d.value, buf, {});
|
serialization->deserializeBinary(d.value, buf, {});
|
||||||
return;
|
return;
|
||||||
|
@ -148,9 +148,8 @@ AggregateFunctionPtr createAggregateFunctionDeltaSum(
|
|||||||
if (isInteger(data_type) || isFloat(data_type))
|
if (isInteger(data_type) || isFloat(data_type))
|
||||||
return AggregateFunctionPtr(createWithNumericType<AggregationFunctionDeltaSum>(
|
return AggregateFunctionPtr(createWithNumericType<AggregationFunctionDeltaSum>(
|
||||||
*data_type, arguments, params));
|
*data_type, arguments, params));
|
||||||
else
|
throw Exception(
|
||||||
throw Exception(ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}",
|
ErrorCodes::ILLEGAL_TYPE_OF_ARGUMENT, "Illegal type {} of argument for aggregate function {}", arguments[0]->getName(), name);
|
||||||
arguments[0]->getName(), name);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,13 +137,12 @@ AggregateFunctionFactory::getAssociatedFunctionByNullsAction(const String & name
|
|||||||
{
|
{
|
||||||
if (action == NullsAction::RESPECT_NULLS)
|
if (action == NullsAction::RESPECT_NULLS)
|
||||||
{
|
{
|
||||||
if (auto it = respect_nulls.find(name); it == respect_nulls.end())
|
auto it = respect_nulls.find(name);
|
||||||
|
if (it == respect_nulls.end())
|
||||||
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} does not support RESPECT NULLS", name);
|
throw Exception(ErrorCodes::NOT_IMPLEMENTED, "Function {} does not support RESPECT NULLS", name);
|
||||||
else if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
|
if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
|
||||||
return {associated_it->second};
|
return {associated_it->second};
|
||||||
else
|
throw Exception(ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} RESPECT NULLS')", it->second, name);
|
||||||
throw Exception(
|
|
||||||
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} RESPECT NULLS')", it->second, name);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (action == NullsAction::IGNORE_NULLS)
|
if (action == NullsAction::IGNORE_NULLS)
|
||||||
@ -152,7 +151,6 @@ AggregateFunctionFactory::getAssociatedFunctionByNullsAction(const String & name
|
|||||||
{
|
{
|
||||||
if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
|
if (auto associated_it = aggregate_functions.find(it->second); associated_it != aggregate_functions.end())
|
||||||
return {associated_it->second};
|
return {associated_it->second};
|
||||||
else
|
|
||||||
throw Exception(
|
throw Exception(
|
||||||
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} IGNORE NULLS')", it->second, name);
|
ErrorCodes::LOGICAL_ERROR, "Unable to find the function {} (equivalent to '{} IGNORE NULLS')", it->second, name);
|
||||||
}
|
}
|
||||||
@ -263,7 +261,6 @@ AggregateFunctionPtr AggregateFunctionFactory::getImpl(
|
|||||||
if (!hints.empty())
|
if (!hints.empty())
|
||||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
|
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION,
|
||||||
"Unknown aggregate function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
|
"Unknown aggregate function {}{}. Maybe you meant: {}", name, extra_info, toString(hints));
|
||||||
else
|
|
||||||
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
|
throw Exception(ErrorCodes::UNKNOWN_AGGREGATE_FUNCTION, "Unknown aggregate function {}{}", name, extra_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -328,8 +328,7 @@ struct AggregateFunctionFlameGraphData
|
|||||||
list = list->next;
|
list = list->next;
|
||||||
return entry;
|
return entry;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
Entry * parent = list;
|
Entry * parent = list;
|
||||||
while (parent->next && parent->next->size != size)
|
while (parent->next && parent->next->size != size)
|
||||||
parent = parent->next;
|
parent = parent->next;
|
||||||
@ -343,7 +342,6 @@ struct AggregateFunctionFlameGraphData
|
|||||||
|
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void add(UInt64 ptr, Int64 size, const UInt64 * stack, size_t stack_size, Arena * arena)
|
void add(UInt64 ptr, Int64 size, const UInt64 * stack, size_t stack_size, Arena * arena)
|
||||||
{
|
{
|
||||||
|
@ -90,7 +90,6 @@ struct GroupArraySamplerData
|
|||||||
/// With a large number of values, we will generate random numbers several times slower.
|
/// With a large number of values, we will generate random numbers several times slower.
|
||||||
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
if (lim <= static_cast<UInt64>(pcg32_fast::max()))
|
||||||
return rng() % lim;
|
return rng() % lim;
|
||||||
else
|
|
||||||
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
return (static_cast<UInt64>(rng()) * (static_cast<UInt64>(pcg32::max()) + 1ULL) + static_cast<UInt64>(rng())) % lim;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -797,8 +796,8 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
|
|||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())");
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "groupArrayLast make sense only with max_elems (groupArrayLast(max_elems)())");
|
||||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ false, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems, std::nullopt);
|
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ false, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems, std::nullopt);
|
||||||
}
|
}
|
||||||
else
|
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, Tlast, /* Tsampler= */ Sampler::NONE>>(
|
||||||
return createAggregateFunctionGroupArrayImpl<GroupArrayTrait</* Thas_limit= */ true, Tlast, /* Tsampler= */ Sampler::NONE>>(argument_types[0], parameters, max_elems, std::nullopt);
|
argument_types[0], parameters, max_elems, std::nullopt);
|
||||||
}
|
}
|
||||||
|
|
||||||
AggregateFunctionPtr createAggregateFunctionGroupArraySample(
|
AggregateFunctionPtr createAggregateFunctionGroupArraySample(
|
||||||
|
@ -381,24 +381,23 @@ IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, con
|
|||||||
{
|
{
|
||||||
WhichDataType which(argument_type);
|
WhichDataType which(argument_type);
|
||||||
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupArrayIntersectDate(argument_type, parameters);
|
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupArrayIntersectDate(argument_type, parameters);
|
||||||
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupArrayIntersectDateTime(argument_type, parameters);
|
if (which.idx == TypeIndex::DateTime)
|
||||||
else if (which.idx == TypeIndex::Date32) return new AggregateFunctionGroupArrayIntersectDate32(argument_type, parameters);
|
return new AggregateFunctionGroupArrayIntersectDateTime(argument_type, parameters);
|
||||||
else if (which.idx == TypeIndex::DateTime64)
|
if (which.idx == TypeIndex::Date32)
|
||||||
|
return new AggregateFunctionGroupArrayIntersectDate32(argument_type, parameters);
|
||||||
|
if (which.idx == TypeIndex::DateTime64)
|
||||||
{
|
{
|
||||||
const auto * datetime64_type = dynamic_cast<const DataTypeDateTime64 *>(argument_type.get());
|
const auto * datetime64_type = dynamic_cast<const DataTypeDateTime64 *>(argument_type.get());
|
||||||
const auto return_type = std::make_shared<DataTypeArray>(std::make_shared<DataTypeDateTime64>(datetime64_type->getScale()));
|
const auto return_type = std::make_shared<DataTypeArray>(std::make_shared<DataTypeDateTime64>(datetime64_type->getScale()));
|
||||||
|
|
||||||
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters, return_type);
|
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters, return_type);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
/// Check that we can use plain version of AggregateFunctionGroupArrayIntersectGeneric
|
/// Check that we can use plain version of AggregateFunctionGroupArrayIntersectGeneric
|
||||||
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
||||||
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters);
|
return new AggregateFunctionGroupArrayIntersectGeneric<true>(argument_type, parameters);
|
||||||
else
|
|
||||||
return new AggregateFunctionGroupArrayIntersectGeneric<false>(argument_type, parameters);
|
return new AggregateFunctionGroupArrayIntersectGeneric<false>(argument_type, parameters);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
inline AggregateFunctionPtr createAggregateFunctionGroupArrayIntersectImpl(const std::string & name, const DataTypePtr & argument_type, const Array & parameters)
|
inline AggregateFunctionPtr createAggregateFunctionGroupArrayIntersectImpl(const std::string & name, const DataTypePtr & argument_type, const Array & parameters)
|
||||||
{
|
{
|
||||||
|
@ -65,7 +65,6 @@ struct MovingSumData : public MovingData<T>
|
|||||||
{
|
{
|
||||||
if (idx < window_size)
|
if (idx < window_size)
|
||||||
return this->value[idx];
|
return this->value[idx];
|
||||||
else
|
|
||||||
return this->value[idx] - this->value[idx - window_size];
|
return this->value[idx] - this->value[idx - window_size];
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -79,7 +78,6 @@ struct MovingAvgData : public MovingData<T>
|
|||||||
{
|
{
|
||||||
if (idx < window_size)
|
if (idx < window_size)
|
||||||
return this->value[idx] / T(window_size);
|
return this->value[idx] / T(window_size);
|
||||||
else
|
|
||||||
return (this->value[idx] - this->value[idx - window_size]) / T(window_size);
|
return (this->value[idx] - this->value[idx - window_size]) / T(window_size);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -285,17 +283,13 @@ AggregateFunctionPtr createAggregateFunctionMoving(
|
|||||||
{
|
{
|
||||||
if (isDecimal(argument_type))
|
if (isDecimal(argument_type))
|
||||||
return createAggregateFunctionMovingImpl<Function, std::false_type, std::true_type>(name, argument_type);
|
return createAggregateFunctionMovingImpl<Function, std::false_type, std::true_type>(name, argument_type);
|
||||||
else
|
|
||||||
return createAggregateFunctionMovingImpl<Function, std::false_type, std::false_type>(name, argument_type);
|
return createAggregateFunctionMovingImpl<Function, std::false_type, std::false_type>(name, argument_type);
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
if (isDecimal(argument_type))
|
if (isDecimal(argument_type))
|
||||||
return createAggregateFunctionMovingImpl<Function, std::true_type, std::true_type>(name, argument_type, max_elems);
|
return createAggregateFunctionMovingImpl<Function, std::true_type, std::true_type>(name, argument_type, max_elems);
|
||||||
else
|
|
||||||
return createAggregateFunctionMovingImpl<Function, std::true_type, std::false_type>(name, argument_type, max_elems);
|
return createAggregateFunctionMovingImpl<Function, std::true_type, std::false_type>(name, argument_type, max_elems);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -391,21 +391,20 @@ AggregateFunctionPtr createAggregateFunctionGroupArray(
|
|||||||
{
|
{
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should have limit argument", name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should have limit argument", name);
|
||||||
}
|
}
|
||||||
else if (parameters.size() == 1)
|
if (parameters.size() == 1)
|
||||||
{
|
{
|
||||||
auto type = parameters[0].getType();
|
auto type = parameters[0].getType();
|
||||||
if (type != Field::Types::Int64 && type != Field::Types::UInt64)
|
if (type != Field::Types::Int64 && type != Field::Types::UInt64)
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
|
||||||
|
|
||||||
if ((type == Field::Types::Int64 && parameters[0].safeGet<Int64>() < 0) ||
|
if ((type == Field::Types::Int64 && parameters[0].safeGet<Int64>() < 0)
|
||||||
(type == Field::Types::UInt64 && parameters[0].safeGet<UInt64>() == 0))
|
|| (type == Field::Types::UInt64 && parameters[0].safeGet<UInt64>() == 0))
|
||||||
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
|
throw Exception(ErrorCodes::BAD_ARGUMENTS, "Parameter for aggregate function {} should be positive number", name);
|
||||||
|
|
||||||
max_elems = parameters[0].safeGet<UInt64>();
|
max_elems = parameters[0].safeGet<UInt64>();
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH,
|
throw Exception(ErrorCodes::NUMBER_OF_ARGUMENTS_DOESNT_MATCH, "Function {} does not support this number of arguments", name);
|
||||||
"Function {} does not support this number of arguments", name);
|
|
||||||
|
|
||||||
if (max_elems > group_array_sorted_sort_strategy_max_elements_threshold)
|
if (max_elems > group_array_sorted_sort_strategy_max_elements_threshold)
|
||||||
return createAggregateFunctionGroupArraySortedImpl<GroupArraySortedSort>(argument_types[0], parameters, max_elems);
|
return createAggregateFunctionGroupArraySortedImpl<GroupArraySortedSort>(argument_types[0], parameters, max_elems);
|
||||||
|
@ -133,7 +133,6 @@ public:
|
|||||||
{
|
{
|
||||||
if (revision >= STATE_VERSION_1_MIN_REVISION)
|
if (revision >= STATE_VERSION_1_MIN_REVISION)
|
||||||
return 1;
|
return 1;
|
||||||
else
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -88,7 +88,6 @@ public:
|
|||||||
{
|
{
|
||||||
if (isSmall())
|
if (isSmall())
|
||||||
return small.size();
|
return small.size();
|
||||||
else
|
|
||||||
return roaring_bitmap->cardinality();
|
return roaring_bitmap->cardinality();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -454,7 +453,6 @@ public:
|
|||||||
|
|
||||||
if (isSmall())
|
if (isSmall())
|
||||||
return small.find(static_cast<T>(x)) != small.end();
|
return small.find(static_cast<T>(x)) != small.end();
|
||||||
else
|
|
||||||
return roaring_bitmap->contains(static_cast<Value>(x));
|
return roaring_bitmap->contains(static_cast<Value>(x));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -554,8 +552,7 @@ public:
|
|||||||
r1.add(elem);
|
r1.add(elem);
|
||||||
return answer.size();
|
return answer.size();
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
UInt64 count = 0;
|
UInt64 count = 0;
|
||||||
for (auto it = roaring_bitmap->begin(); it != roaring_bitmap->end(); ++it)
|
for (auto it = roaring_bitmap->begin(); it != roaring_bitmap->end(); ++it)
|
||||||
{
|
{
|
||||||
@ -572,7 +569,6 @@ public:
|
|||||||
}
|
}
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
UInt64 rb_offset_limit(UInt64 offset, UInt64 limit, RoaringBitmapWithSmallSet & r1) const /// NOLINT
|
UInt64 rb_offset_limit(UInt64 offset, UInt64 limit, RoaringBitmapWithSmallSet & r1) const /// NOLINT
|
||||||
{
|
{
|
||||||
@ -591,8 +587,7 @@ public:
|
|||||||
r1.add(it->getValue());
|
r1.add(it->getValue());
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
{
|
|
||||||
UInt64 count = 0;
|
UInt64 count = 0;
|
||||||
UInt64 offset_count = 0;
|
UInt64 offset_count = 0;
|
||||||
auto it = roaring_bitmap->begin();
|
auto it = roaring_bitmap->begin();
|
||||||
@ -603,7 +598,6 @@ public:
|
|||||||
r1.add(*it);
|
r1.add(*it);
|
||||||
return count;
|
return count;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
UInt64 rb_min() const /// NOLINT
|
UInt64 rb_min() const /// NOLINT
|
||||||
{
|
{
|
||||||
@ -620,7 +614,6 @@ public:
|
|||||||
}
|
}
|
||||||
return min_val;
|
return min_val;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return roaring_bitmap->minimum();
|
return roaring_bitmap->minimum();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -639,7 +632,6 @@ public:
|
|||||||
}
|
}
|
||||||
return max_val;
|
return max_val;
|
||||||
}
|
}
|
||||||
else
|
|
||||||
return roaring_bitmap->maximum();
|
return roaring_bitmap->maximum();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -275,7 +275,6 @@ AggregateFunctionPtr createAggregateFunctionGroupConcat(
|
|||||||
|
|
||||||
if (has_limit)
|
if (has_limit)
|
||||||
return std::make_shared<GroupConcatImpl</* has_limit= */ true>>(argument_types[0], parameters, limit, delimiter);
|
return std::make_shared<GroupConcatImpl</* has_limit= */ true>>(argument_types[0], parameters, limit, delimiter);
|
||||||
else
|
|
||||||
return std::make_shared<GroupConcatImpl</* has_limit= */ false>>(argument_types[0], parameters, limit, delimiter);
|
return std::make_shared<GroupConcatImpl</* has_limit= */ false>>(argument_types[0], parameters, limit, delimiter);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -276,17 +276,16 @@ IAggregateFunction * createWithExtraTypes(const DataTypePtr & argument_type, TAr
|
|||||||
{
|
{
|
||||||
WhichDataType which(argument_type);
|
WhichDataType which(argument_type);
|
||||||
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate<HasLimit>(argument_type, args...);
|
if (which.idx == TypeIndex::Date) return new AggregateFunctionGroupUniqArrayDate<HasLimit>(argument_type, args...);
|
||||||
else if (which.idx == TypeIndex::DateTime) return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, args...);
|
if (which.idx == TypeIndex::DateTime)
|
||||||
else if (which.idx == TypeIndex::IPv4) return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, args...);
|
return new AggregateFunctionGroupUniqArrayDateTime<HasLimit>(argument_type, args...);
|
||||||
else
|
if (which.idx == TypeIndex::IPv4)
|
||||||
{
|
return new AggregateFunctionGroupUniqArrayIPv4<HasLimit>(argument_type, args...);
|
||||||
|
|
||||||
/// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric
|
/// Check that we can use plain version of AggregateFunctionGroupUniqArrayGeneric
|
||||||
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
if (argument_type->isValueUnambiguouslyRepresentedInContiguousMemoryRegion())
|
||||||
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, args...);
|
return new AggregateFunctionGroupUniqArrayGeneric<true, HasLimit>(argument_type, args...);
|
||||||
else
|
|
||||||
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, args...);
|
return new AggregateFunctionGroupUniqArrayGeneric<false, HasLimit>(argument_type, args...);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
template <typename HasLimit, typename ... TArgs>
|
template <typename HasLimit, typename ... TArgs>
|
||||||
inline AggregateFunctionPtr createAggregateFunctionGroupUniqArrayImpl(const std::string & name, const DataTypePtr & argument_type, TArgs ... args)
|
inline AggregateFunctionPtr createAggregateFunctionGroupUniqArrayImpl(const std::string & name, const DataTypePtr & argument_type, TArgs ... args)
|
||||||
@ -336,7 +335,6 @@ AggregateFunctionPtr createAggregateFunctionGroupUniqArray(
|
|||||||
|
|
||||||
if (!limit_size)
|
if (!limit_size)
|
||||||
return createAggregateFunctionGroupUniqArrayImpl<std::false_type>(name, argument_types[0], parameters);
|
return createAggregateFunctionGroupUniqArrayImpl<std::false_type>(name, argument_types[0], parameters);
|
||||||
else
|
|
||||||
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], parameters, max_elems);
|
return createAggregateFunctionGroupUniqArrayImpl<std::true_type>(name, argument_types[0], parameters, max_elems);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +87,6 @@ public:
|
|||||||
{
|
{
|
||||||
if (kind_ == AggregateFunctionIntersectionsKind::Count)
|
if (kind_ == AggregateFunctionIntersectionsKind::Count)
|
||||||
return std::make_shared<DataTypeUInt64>();
|
return std::make_shared<DataTypeUInt64>();
|
||||||
else
|
|
||||||
return std::make_shared<DataTypeNumber<PointType>>();
|
return std::make_shared<DataTypeNumber<PointType>>();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,9 @@ public:
|
|||||||
{
|
{
|
||||||
if (other.count == 0)
|
if (other.count == 0)
|
||||||
return;
|
return;
|
||||||
else if (count == 0)
|
|
||||||
|
/// NOLINTBEGIN(readability-else-after-return)
|
||||||
|
if (count == 0)
|
||||||
{
|
{
|
||||||
compress_threshold = other.compress_threshold;
|
compress_threshold = other.compress_threshold;
|
||||||
relative_error = other.relative_error;
|
relative_error = other.relative_error;
|
||||||
@ -237,6 +239,7 @@ public:
|
|||||||
doCompress(2 * merged_relative_error * merged_count);
|
doCompress(2 * merged_relative_error * merged_count);
|
||||||
compressed = true;
|
compressed = true;
|
||||||
}
|
}
|
||||||
|
/// NOLINTEND(readability-else-after-return)
|
||||||
}
|
}
|
||||||
|
|
||||||
void write(WriteBuffer & buf) const
|
void write(WriteBuffer & buf) const
|
||||||
@ -292,13 +295,11 @@ private:
|
|||||||
Int64 max_rank = min_rank + curr_sample.delta;
|
Int64 max_rank = min_rank + curr_sample.delta;
|
||||||
if (max_rank - target_error <= rank && rank <= min_rank + target_error)
|
if (max_rank - target_error <= rank && rank <= min_rank + target_error)
|
||||||
return {i, min_rank, curr_sample.value};
|
return {i, min_rank, curr_sample.value};
|
||||||
else
|
|
||||||
{
|
|
||||||
++i;
|
++i;
|
||||||
curr_sample = sampled[i];
|
curr_sample = sampled[i];
|
||||||
min_rank += curr_sample.g;
|
min_rank += curr_sample.g;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
return {sampled.size() - 1, 0, sampled.back().value};
|
return {sampled.size() - 1, 0, sampled.back().value};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user