mirror of
https://github.com/ClickHouse/ClickHouse.git
synced 2024-11-14 19:45:11 +00:00
Merge branch 'master' into docs_globalVariable
This commit is contained in:
commit
56fe7206d0
@ -16,3 +16,6 @@
|
||||
|
||||
# Applied Black formatter for Python code
|
||||
e6f5a3f98b21ba99cf274a9833797889e020a2b3
|
||||
|
||||
# Enabling clang-tidy readability-else-no-return rule
|
||||
67c1e89d90ef576e62f8b1c68269742a3c6f9b1e
|
||||
|
2
.github/ISSUE_TEMPLATE/20_feature-request.md
vendored
2
.github/ISSUE_TEMPLATE/20_feature-request.md
vendored
@ -15,7 +15,7 @@ assignees: ''
|
||||
|
||||
**Use case**
|
||||
|
||||
> A clear and concise description of what is the intended usage scenario is.
|
||||
> A clear and concise description of what the intended usage scenario is.
|
||||
|
||||
**Describe the solution you'd like**
|
||||
|
||||
|
1
.github/actionlint.yml
vendored
1
.github/actionlint.yml
vendored
@ -4,7 +4,6 @@ self-hosted-runner:
|
||||
- func-tester
|
||||
- func-tester-aarch64
|
||||
- fuzzer-unit-tester
|
||||
- stress-tester
|
||||
- style-checker
|
||||
- style-checker-aarch64
|
||||
- release-maker
|
||||
|
21
.github/workflows/backport_branches.yml
vendored
21
.github/workflows/backport_branches.yml
vendored
@ -229,18 +229,26 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
#############################################################################################
|
||||
IntegrationTestsRelease:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
IntegrationTestsAsanOldAnalyzer:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
if: ${{ !failure() && !cancelled() }}
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
@ -250,7 +258,8 @@ jobs:
|
||||
- FunctionalStatelessTestAsan
|
||||
- FunctionalStatefulTestDebug
|
||||
- StressTestTsan
|
||||
- IntegrationTestsRelease
|
||||
- IntegrationTestsTsan
|
||||
- IntegrationTestsAsanOldAnalyzer
|
||||
- CompatibilityCheckX86
|
||||
- CompatibilityCheckAarch64
|
||||
runs-on: [self-hosted, style-checker]
|
||||
|
3
.github/workflows/pull_request.yml
vendored
3
.github/workflows/pull_request.yml
vendored
@ -33,9 +33,6 @@ jobs:
|
||||
filter: tree:0
|
||||
- name: Debug Info
|
||||
uses: ./.github/actions/debug
|
||||
- name: Cancel previous Sync PR workflow
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --cancel-previous-run
|
||||
- name: Set pending Sync status
|
||||
run: |
|
||||
python3 "$GITHUB_WORKSPACE/tests/ci/ci.py" --set-pending-status
|
||||
|
18
.github/workflows/release_branches.yml
vendored
18
.github/workflows/release_branches.yml
vendored
@ -374,7 +374,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (asan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
@ -382,7 +382,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestMsan:
|
||||
needs: [RunConfig, BuilderDebMsan]
|
||||
@ -390,7 +390,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (msan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestUBsan:
|
||||
needs: [RunConfig, BuilderDebUBsan]
|
||||
@ -398,7 +398,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (ubsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
StressTestDebug:
|
||||
needs: [RunConfig, BuilderDebDebug]
|
||||
@ -406,7 +406,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Stress test (debug)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
#############################################################################################
|
||||
############################# INTEGRATION TESTS #############################################
|
||||
@ -417,7 +417,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsAnalyzerAsan:
|
||||
needs: [RunConfig, BuilderDebAsan]
|
||||
@ -425,7 +425,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (asan, old analyzer)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsTsan:
|
||||
needs: [RunConfig, BuilderDebTsan]
|
||||
@ -433,7 +433,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (tsan)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
IntegrationTestsRelease:
|
||||
needs: [RunConfig, BuilderDebRelease]
|
||||
@ -441,7 +441,7 @@ jobs:
|
||||
uses: ./.github/workflows/reusable_test.yml
|
||||
with:
|
||||
test_name: Integration tests (release)
|
||||
runner_type: stress-tester
|
||||
runner_type: func-tester
|
||||
data: ${{ needs.RunConfig.outputs.data }}
|
||||
FinishCheck:
|
||||
if: ${{ !cancelled() }}
|
||||
|
@ -339,7 +339,6 @@ set (CMAKE_ASM_FLAGS_RELWITHDEBINFO "${CMAKE_ASM_FLAGS_RELWITHDEBINFO} -O3
|
||||
set (CMAKE_ASM_FLAGS_DEBUG "${CMAKE_ASM_FLAGS_DEBUG} -O${DEBUG_O_LEVEL} ${DEBUG_INFO_FLAGS} ${CMAKE_ASM_FLAGS_ADD}")
|
||||
|
||||
if (OS_DARWIN)
|
||||
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++")
|
||||
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-U,_inside_main")
|
||||
endif()
|
||||
|
||||
|
@ -110,8 +110,7 @@ struct DecomposedFloat
|
||||
{
|
||||
if (!isNegative())
|
||||
return rhs > 0 ? -1 : 1;
|
||||
else
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
return rhs >= 0 ? -1 : 1;
|
||||
}
|
||||
|
||||
/// The case of the most negative integer
|
||||
@ -128,8 +127,7 @@ struct DecomposedFloat
|
||||
|
||||
if (mantissa() == 0)
|
||||
return 0;
|
||||
else
|
||||
return -1;
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -169,9 +167,8 @@ struct DecomposedFloat
|
||||
/// Float has no fractional part means that the numbers are equal.
|
||||
if (large_and_always_integer || (mantissa() & ((1ULL << (Traits::mantissa_bits - normalizedExponent())) - 1)) == 0)
|
||||
return 0;
|
||||
else
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return isNegative() ? -1 : 1;
|
||||
/// Float has fractional part means its abs value is larger.
|
||||
return isNegative() ? -1 : 1;
|
||||
}
|
||||
|
||||
|
||||
|
@ -205,8 +205,7 @@ JSON::ElementType JSON::getType() const
|
||||
Pos after_string = skipString();
|
||||
if (after_string < ptr_end && *after_string == ':')
|
||||
return TYPE_NAME_VALUE_PAIR;
|
||||
else
|
||||
return TYPE_STRING;
|
||||
return TYPE_STRING;
|
||||
}
|
||||
default:
|
||||
throw JSONException(std::string("JSON: unexpected char ") + *ptr_begin + ", expected one of '{[tfn-0123456789\"'");
|
||||
@ -474,8 +473,7 @@ JSON::Pos JSON::searchField(const char * data, size_t size) const
|
||||
|
||||
if (it == end())
|
||||
return nullptr;
|
||||
else
|
||||
return it->data();
|
||||
return it->data();
|
||||
}
|
||||
|
||||
|
||||
@ -487,7 +485,7 @@ bool JSON::hasEscapes() const
|
||||
|
||||
if (*pos == '"')
|
||||
return false;
|
||||
else if (*pos == '\\')
|
||||
if (*pos == '\\')
|
||||
return true;
|
||||
throw JSONException("JSON: unexpected end of data.");
|
||||
}
|
||||
@ -503,7 +501,7 @@ bool JSON::hasSpecialChars() const
|
||||
|
||||
if (*pos == '"')
|
||||
return false;
|
||||
else if (pos < ptr_end)
|
||||
if (pos < ptr_end)
|
||||
return true;
|
||||
throw JSONException("JSON: unexpected end of data.");
|
||||
}
|
||||
@ -682,10 +680,9 @@ double JSON::toDouble() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getDouble();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getDouble();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to double.");
|
||||
throw JSONException("JSON: cannot convert value to double.");
|
||||
}
|
||||
|
||||
Int64 JSON::toInt() const
|
||||
@ -694,10 +691,9 @@ Int64 JSON::toInt() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getInt();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getInt();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to signed integer.");
|
||||
throw JSONException("JSON: cannot convert value to signed integer.");
|
||||
}
|
||||
|
||||
UInt64 JSON::toUInt() const
|
||||
@ -706,10 +702,9 @@ UInt64 JSON::toUInt() const
|
||||
|
||||
if (type == TYPE_NUMBER)
|
||||
return getUInt();
|
||||
else if (type == TYPE_STRING)
|
||||
if (type == TYPE_STRING)
|
||||
return JSON(ptr_begin + 1, ptr_end, level + 1).getUInt();
|
||||
else
|
||||
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
||||
throw JSONException("JSON: cannot convert value to unsigned integer.");
|
||||
}
|
||||
|
||||
std::string JSON::toString() const
|
||||
@ -718,11 +713,9 @@ std::string JSON::toString() const
|
||||
|
||||
if (type == TYPE_STRING)
|
||||
return getString();
|
||||
else
|
||||
{
|
||||
Pos pos = skipElement();
|
||||
return std::string(ptr_begin, pos - ptr_begin);
|
||||
}
|
||||
|
||||
Pos pos = skipElement();
|
||||
return std::string(ptr_begin, pos - ptr_begin);
|
||||
}
|
||||
|
||||
|
||||
|
@ -203,9 +203,7 @@ T JSON::getWithDefault(const std::string & key, const T & default_) const
|
||||
|
||||
if (key_json.isType<T>())
|
||||
return key_json.get<T>();
|
||||
else
|
||||
return default_;
|
||||
}
|
||||
else
|
||||
return default_;
|
||||
}
|
||||
return default_;
|
||||
}
|
||||
|
@ -151,19 +151,19 @@ inline bool memequalWide(const char * p1, const char * p2, size_t size)
|
||||
return unalignedLoad<uint64_t>(p1) == unalignedLoad<uint64_t>(p2)
|
||||
&& unalignedLoad<uint64_t>(p1 + size - 8) == unalignedLoad<uint64_t>(p2 + size - 8);
|
||||
}
|
||||
else if (size >= 4)
|
||||
if (size >= 4)
|
||||
{
|
||||
/// Chunks of 4..7 bytes.
|
||||
return unalignedLoad<uint32_t>(p1) == unalignedLoad<uint32_t>(p2)
|
||||
&& unalignedLoad<uint32_t>(p1 + size - 4) == unalignedLoad<uint32_t>(p2 + size - 4);
|
||||
}
|
||||
else if (size >= 2)
|
||||
if (size >= 2)
|
||||
{
|
||||
/// Chunks of 2..3 bytes.
|
||||
return unalignedLoad<uint16_t>(p1) == unalignedLoad<uint16_t>(p2)
|
||||
&& unalignedLoad<uint16_t>(p1 + size - 2) == unalignedLoad<uint16_t>(p2 + size - 2);
|
||||
}
|
||||
else if (size >= 1)
|
||||
if (size >= 1)
|
||||
{
|
||||
/// A single byte.
|
||||
return *p1 == *p2;
|
||||
|
@ -53,10 +53,9 @@ void argsToConfig(const Poco::Util::Application::ArgVec & argv,
|
||||
key = arg.substr(key_start);
|
||||
continue;
|
||||
}
|
||||
else
|
||||
{
|
||||
key = "";
|
||||
}
|
||||
|
||||
key = "";
|
||||
|
||||
|
||||
if (key_start == std::string::npos)
|
||||
continue;
|
||||
|
@ -330,9 +330,8 @@ inline const char * find_first_symbols_dispatch(const char * begin, const char *
|
||||
#if defined(__SSE4_2__)
|
||||
if (sizeof...(symbols) >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode, sizeof...(symbols), symbols...>(begin, end);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
return find_first_symbols_sse2<positive, return_mode, symbols...>(begin, end);
|
||||
}
|
||||
|
||||
template <bool positive, ReturnMode return_mode>
|
||||
@ -341,9 +340,8 @@ inline const char * find_first_symbols_dispatch(const std::string_view haystack,
|
||||
#if defined(__SSE4_2__)
|
||||
if (symbols.str.size() >= 5)
|
||||
return find_first_symbols_sse42<positive, return_mode>(haystack.begin(), haystack.end(), symbols);
|
||||
else
|
||||
#endif
|
||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||
return find_first_symbols_sse2<positive, return_mode>(haystack.begin(), haystack.end(), symbols.str.data(), symbols.str.size());
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -33,8 +33,7 @@ std::optional<uint64_t> getCgroupsV2MemoryLimit()
|
||||
uint64_t value;
|
||||
if (setting_file >> value)
|
||||
return {value};
|
||||
else
|
||||
return {}; /// e.g. the cgroups default "max"
|
||||
return {}; /// e.g. the cgroups default "max"
|
||||
}
|
||||
current_cgroup = current_cgroup.parent_path();
|
||||
}
|
||||
|
@ -1420,8 +1420,6 @@ config
|
||||
configs
|
||||
conformant
|
||||
congruential
|
||||
conjuction
|
||||
conjuctive
|
||||
connectionId
|
||||
const
|
||||
contrib
|
||||
@ -1698,7 +1696,6 @@ formatReadableSize
|
||||
formatReadableTimeDelta
|
||||
formatRow
|
||||
formatRowNoNewline
|
||||
formated
|
||||
formatschema
|
||||
formatter
|
||||
formatters
|
||||
@ -3048,3 +3045,89 @@ znode
|
||||
znodes
|
||||
zookeeperSessionUptime
|
||||
zstd
|
||||
ArrowCompression
|
||||
CapnProtoEnumComparingMode
|
||||
DateTimeInputFormat
|
||||
DateTimeOutputFormat
|
||||
DateTimeOverflowBehavior
|
||||
deserialize
|
||||
dotall
|
||||
EachRow
|
||||
EscapingRule
|
||||
IdentifierQuotingRule
|
||||
IdentifierQuotingStyle
|
||||
IntervalOutputFormat
|
||||
MsgPackUUIDRepresentation
|
||||
ORCCompression
|
||||
ParquetCompression
|
||||
ParquetVersion
|
||||
SchemaInferenceMode
|
||||
alloc
|
||||
CacheWarmer
|
||||
conjuctive
|
||||
cors
|
||||
CORS
|
||||
countIf
|
||||
DefaultTableEngine
|
||||
dereference
|
||||
DistributedDDLOutputMode
|
||||
DistributedProductMode
|
||||
formatdatetime
|
||||
inequal
|
||||
INVOKER
|
||||
ITION
|
||||
JoinAlgorithm
|
||||
JoinStrictness
|
||||
keepalive
|
||||
ListObject
|
||||
ListObjects
|
||||
LoadBalancing
|
||||
LocalFSReadMethod
|
||||
LogQueriesType
|
||||
LogsLevel
|
||||
MaxThreads
|
||||
MemorySample
|
||||
multibuffer
|
||||
multiif
|
||||
multiread
|
||||
multithreading
|
||||
MySQLDataTypesSupport
|
||||
nonconst
|
||||
NonZeroUInt
|
||||
nullptr
|
||||
OverflowMode
|
||||
OverflowModeGroupBy
|
||||
ParallelReplicasMode
|
||||
param
|
||||
parsedatetime
|
||||
perf
|
||||
PerfEventInfo
|
||||
perkey
|
||||
prefetched
|
||||
prefetches
|
||||
prefetching
|
||||
preimage
|
||||
QueryCacheNondeterministicFunctionHandling
|
||||
QueryCacheSystemTableHandling
|
||||
remerge
|
||||
replcase
|
||||
rerange
|
||||
RetryStrategy
|
||||
rowlist
|
||||
SetOperationMode
|
||||
ShortCircuitFunctionEvaluation
|
||||
SQLSecurityType
|
||||
sumIf
|
||||
TCPHandler
|
||||
throwif
|
||||
TotalsMode
|
||||
TransactionsWaitCSNMode
|
||||
undelete
|
||||
unmerged
|
||||
DataPacket
|
||||
DDLs
|
||||
DistributedCacheLogMode
|
||||
DistributedCachePoolBehaviourOnLimit
|
||||
SharedJoin
|
||||
ShareSet
|
||||
unacked
|
||||
|
@ -11,6 +11,38 @@ option (ARCH_NATIVE "Add -march=native compiler flag. This makes your binaries n
|
||||
if (ARCH_NATIVE)
|
||||
set (COMPILER_FLAGS "${COMPILER_FLAGS} -march=native")
|
||||
|
||||
# Populate the ENABLE_ option flags. This is required for the build of some third-party dependencies, specifically snappy, which
|
||||
# (somewhat weirdly) expects the relative SNAPPY_HAVE_ preprocessor variables to be populated, in addition to the microarchitecture
|
||||
# feature flags being enabled in the compiler. This fixes the ARCH_NATIVE flag by automatically populating the ENABLE_ option flags
|
||||
# according to the current CPU's capabilities, detected using clang.
|
||||
if (ARCH_AMD64)
|
||||
execute_process(
|
||||
COMMAND sh -c "clang -E - -march=native -###"
|
||||
INPUT_FILE /dev/null
|
||||
OUTPUT_QUIET
|
||||
ERROR_VARIABLE TEST_FEATURE_RESULT)
|
||||
|
||||
macro(TEST_AMD64_FEATURE TEST_FEATURE_RESULT feat flag)
|
||||
if (${TEST_FEATURE_RESULT} MATCHES "\"\\+${feat}\"")
|
||||
set(${flag} ON)
|
||||
else ()
|
||||
set(${flag} OFF)
|
||||
endif ()
|
||||
endmacro()
|
||||
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} ssse3 ENABLE_SSSE3)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.1 ENABLE_SSE41)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} sse4.2 ENABLE_SSE42)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} vpclmulqdq ENABLE_PCLMULQDQ)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} popcnt ENABLE_POPCNT)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx ENABLE_AVX)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx2 ENABLE_AVX2)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512f ENABLE_AVX512)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} avx512vbmi ENABLE_AVX512_VBMI)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi ENABLE_BMI)
|
||||
TEST_AMD64_FEATURE (${TEST_FEATURE_RESULT} bmi2 ENABLE_BMI2)
|
||||
endif ()
|
||||
|
||||
elseif (ARCH_AARCH64)
|
||||
# ARM publishes almost every year a new revision of it's ISA [1]. Each version comes with new mandatory and optional features from
|
||||
# which CPU vendors can pick and choose. This creates a lot of variability ... We provide two build "profiles", one for maximum
|
||||
|
@ -1,4 +1,21 @@
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_DEBUG=0") # More checks in debug build.
|
||||
if (CMAKE_BUILD_TYPE_UC STREQUAL "DEBUG")
|
||||
# Enable libcxx debug mode: https://releases.llvm.org/15.0.0/projects/libcxx/docs/DesignDocs/DebugMode.html
|
||||
# The docs say the debug mode violates complexity guarantees, so do this only for Debug builds.
|
||||
# set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_ENABLE_DEBUG_MODE=1")
|
||||
# ^^ Crashes the database upon startup, needs investigation.
|
||||
# Besides that, the implementation looks like a poor man's MSAN specific to libcxx. Since CI tests MSAN
|
||||
# anyways, we can keep the debug mode disabled.
|
||||
|
||||
# Libcxx also provides extra assertions:
|
||||
# --> https://releases.llvm.org/15.0.0/projects/libcxx/docs/UsingLibcxx.html#assertions-mode
|
||||
# These look orthogonal to the debug mode but the debug mode enables them implicitly:
|
||||
# --> https://github.com/llvm/llvm-project/blob/release/15.x/libcxx/include/__assert#L29
|
||||
# They are cheap and straightforward, so enable them in debug builds:
|
||||
set (CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -D_LIBCPP_ENABLE_ASSERTIONS=1")
|
||||
|
||||
# TODO Once we upgrade to LLVM 18+, reconsider all of the above as they introduced "hardening modes":
|
||||
# https://libcxx.llvm.org/Hardening.html
|
||||
endif ()
|
||||
|
||||
add_subdirectory(contrib/libcxxabi-cmake)
|
||||
add_subdirectory(contrib/libcxx-cmake)
|
||||
|
@ -48,6 +48,8 @@ if (NOT LINKER_NAME)
|
||||
find_program (LLD_PATH NAMES "ld.lld-${COMPILER_VERSION_MAJOR}" "ld.lld")
|
||||
elseif (OS_DARWIN)
|
||||
find_program (LLD_PATH NAMES "ld")
|
||||
# Duplicate libraries passed to the linker is not a problem.
|
||||
set (CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -Wl,-no_warn_duplicate_libraries")
|
||||
endif ()
|
||||
if (LLD_PATH)
|
||||
if (OS_LINUX OR OS_DARWIN)
|
||||
|
@ -1 +1,4 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
||||
set (FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16/")
|
||||
|
||||
add_library(_fp16 INTERFACE)
|
||||
target_include_directories(_fp16 SYSTEM INTERFACE ${FP16_PROJECT_DIR}/include)
|
||||
|
2
contrib/SimSIMD
vendored
2
contrib/SimSIMD
vendored
@ -1 +1 @@
|
||||
Subproject commit 91a76d1ac519b3b9dc8957734a3dabd985f00c26
|
||||
Subproject commit ff51434d90c66f916e94ff05b24530b127aa4cff
|
@ -1 +1,4 @@
|
||||
# See contrib/usearch-cmake/CMakeLists.txt
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||
|
||||
add_library(_simsimd INTERFACE)
|
||||
target_include_directories(_simsimd SYSTEM INTERFACE "${SIMSIMD_PROJECT_DIR}/include")
|
||||
|
@ -1,6 +1,9 @@
|
||||
set(ABSL_ROOT_DIR "${ClickHouse_SOURCE_DIR}/contrib/abseil-cpp")
|
||||
set(ABSL_COMMON_INCLUDE_DIRS "${ABSL_ROOT_DIR}")
|
||||
|
||||
# To avoid errors "'X' does not refer to a value" while using `offsetof` function.
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
# This is a minimized version of the function definition in CMake/AbseilHelpers.cmake
|
||||
|
||||
#
|
||||
|
@ -5,6 +5,9 @@ if(NOT ENABLE_PROTOBUF)
|
||||
return()
|
||||
endif()
|
||||
|
||||
# To avoid errors "'X' does not refer to a value" while using `offsetof` function.
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
set(Protobuf_INCLUDE_DIR "${ClickHouse_SOURCE_DIR}/contrib/google-protobuf/src")
|
||||
if(OS_FREEBSD AND SANITIZE STREQUAL "address")
|
||||
# ../contrib/protobuf/src/google/protobuf/arena_impl.h:45:10: fatal error: 'sanitizer/asan_interface.h' file not found
|
||||
|
2
contrib/grpc
vendored
2
contrib/grpc
vendored
@ -1 +1 @@
|
||||
Subproject commit 7bc3abe952aba1dc7bce7f2f790dc781cb51a41e
|
||||
Subproject commit 62e871c36fa93c0af939bd31762845265214fe3d
|
@ -6,6 +6,8 @@ if(NOT ENABLE_GRPC)
|
||||
return()
|
||||
endif()
|
||||
|
||||
set(CMAKE_CXX_STANDARD 17)
|
||||
|
||||
set(_gRPC_SOURCE_DIR "${ClickHouse_SOURCE_DIR}/contrib/grpc")
|
||||
set(_gRPC_BINARY_DIR "${ClickHouse_BINARY_DIR}/contrib/grpc")
|
||||
|
||||
|
@ -22,7 +22,7 @@
|
||||
# limitations under the License.
|
||||
|
||||
# We want to use C++23, but GRPC is not ready
|
||||
set (CMAKE_CXX_STANDARD 20)
|
||||
set (CMAKE_CXX_STANDARD 17)
|
||||
|
||||
set(_gRPC_ZLIB_INCLUDE_DIR "")
|
||||
set(_gRPC_ZLIB_LIBRARIES ch_contrib::zlib)
|
||||
|
2
contrib/libdivide
vendored
2
contrib/libdivide
vendored
@ -1 +1 @@
|
||||
Subproject commit 3bd34388573681ce563348cdf04fe15d24770d04
|
||||
Subproject commit 01526031eb79375dc85e0212c966d2c514a01234
|
2
contrib/simdjson
vendored
2
contrib/simdjson
vendored
@ -1 +1 @@
|
||||
Subproject commit 6060be2fdf62edf4a8f51a8b0883d57d09397b30
|
||||
Subproject commit e341c8b43861b43de29c48ab65f292d997096953
|
2
contrib/usearch
vendored
2
contrib/usearch
vendored
@ -1 +1 @@
|
||||
Subproject commit 7a8967cb442b08ca20c3dd781414378e65957d37
|
||||
Subproject commit d1d33eac94acd3b628e0b446c927ec3295ef63c7
|
@ -1,14 +1,9 @@
|
||||
set(FP16_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/FP16")
|
||||
set(SIMSIMD_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/SimSIMD")
|
||||
set(USEARCH_PROJECT_DIR "${ClickHouse_SOURCE_DIR}/contrib/usearch")
|
||||
|
||||
add_library(_usearch INTERFACE)
|
||||
target_include_directories(_usearch SYSTEM INTERFACE ${USEARCH_PROJECT_DIR}/include)
|
||||
|
||||
target_include_directories(_usearch SYSTEM INTERFACE
|
||||
${FP16_PROJECT_DIR}/include
|
||||
${SIMSIMD_PROJECT_DIR}/include
|
||||
${USEARCH_PROJECT_DIR}/include)
|
||||
|
||||
target_link_libraries(_usearch INTERFACE _fp16)
|
||||
target_compile_definitions(_usearch INTERFACE USEARCH_USE_FP16LIB)
|
||||
|
||||
# target_compile_definitions(_usearch INTERFACE USEARCH_USE_SIMSIMD)
|
||||
|
@ -9,7 +9,7 @@ RUN CGO_ENABLED=0 go install github.com/wjdp/htmltest@v${HTMLTEST_VERSION} \
|
||||
# nodejs 17 prefers ipv6 and is broken in our environment
|
||||
FROM node:16-alpine
|
||||
|
||||
RUN apk add --no-cache git openssh bash
|
||||
RUN apk add --no-cache git openssh bash curl
|
||||
|
||||
# At this point we want to really update /opt/clickhouse-docs directory
|
||||
# So we reset the cache
|
||||
@ -33,4 +33,7 @@ RUN mkdir /output_path \
|
||||
COPY run.sh /run.sh
|
||||
COPY --from=htmltest-builder /usr/bin/htmltest /usr/bin/htmltest
|
||||
|
||||
# Install ClickHouse Local, which is used to auto-generate some doc pages.
|
||||
RUN curl https://clickhouse.com/ | sh
|
||||
|
||||
ENTRYPOINT ["/run.sh"]
|
||||
|
@ -21,6 +21,78 @@ do
|
||||
fi
|
||||
done
|
||||
|
||||
# Generate pages with settings
|
||||
|
||||
./clickhouse -q "
|
||||
WITH
|
||||
|
||||
'/ClickHouse/src/Core/Settings.cpp' AS cpp_file,
|
||||
|
||||
settings_from_cpp AS
|
||||
(
|
||||
SELECT extract(line, 'M\\(\\w+, (\\w+),') AS name
|
||||
FROM file(cpp_file, LineAsString)
|
||||
WHERE match(line, '^\\s*M\\(')
|
||||
),
|
||||
|
||||
main_content AS
|
||||
(
|
||||
SELECT format('## {} {}\\n\\nType: {}\\n\\nDefault value: {}\\n\\n{}\\n\\n', name, '{#'||name||'}', type, default, trim(BOTH '\\n' FROM description))
|
||||
FROM system.settings WHERE name IN settings_from_cpp
|
||||
ORDER BY name
|
||||
),
|
||||
|
||||
'---
|
||||
sidebar_label: Core Settings
|
||||
sidebar_position: 2
|
||||
slug: /en/operations/settings/settings
|
||||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
# Core Settings
|
||||
|
||||
All below settings are also available in table [system.settings](/docs/en/operations/system-tables/settings).
|
||||
|
||||
' AS prefix
|
||||
|
||||
SELECT prefix || (SELECT groupConcat(*) FROM main_content)
|
||||
INTO OUTFILE '/opt/clickhouse-docs/docs/en/operations/settings/settings.md' TRUNCATE FORMAT LineAsString
|
||||
"
|
||||
|
||||
./clickhouse -q "
|
||||
WITH
|
||||
|
||||
'/ClickHouse/src/Core/FormatFactorySettingsDeclaration.h' AS cpp_file,
|
||||
|
||||
settings_from_cpp AS
|
||||
(
|
||||
SELECT extract(line, 'M\\(\\w+, (\\w+),') AS name
|
||||
FROM file(cpp_file, LineAsString)
|
||||
WHERE match(line, '^\\s*M\\(')
|
||||
),
|
||||
|
||||
main_content AS
|
||||
(
|
||||
SELECT format('## {} {}\\n\\nType: {}\\n\\nDefault value: {}\\n\\n{}\\n\\n', name, '{#'||name||'}', type, default, trim(BOTH '\\n' FROM description))
|
||||
FROM system.settings WHERE name IN settings_from_cpp
|
||||
ORDER BY name
|
||||
),
|
||||
|
||||
'---
|
||||
sidebar_label: Format Settings
|
||||
sidebar_position: 52
|
||||
slug: /en/operations/settings/formats
|
||||
toc_max_heading_level: 2
|
||||
---
|
||||
|
||||
# Format settings {#format-settings}
|
||||
|
||||
' AS prefix
|
||||
|
||||
SELECT prefix || (SELECT groupConcat(*) FROM main_content)
|
||||
INTO OUTFILE '/opt/clickhouse-docs/docs/en/operations/settings/settings-formats.md' TRUNCATE FORMAT LineAsString
|
||||
"
|
||||
|
||||
# Force build error on wrong symlinks
|
||||
sed -i '/onBrokenMarkdownLinks:/ s/ignore/error/g' docusaurus.config.js
|
||||
|
||||
|
@ -34,7 +34,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.9.1.3278"
|
||||
ARG VERSION="24.9.2.42"
|
||||
ARG PACKAGES="clickhouse-keeper"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -35,7 +35,7 @@ RUN arch=${TARGETARCH:-amd64} \
|
||||
# lts / testing / prestable / etc
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="https://packages.clickhouse.com/tgz/${REPO_CHANNEL}"
|
||||
ARG VERSION="24.9.1.3278"
|
||||
ARG VERSION="24.9.2.42"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
ARG DIRECT_DOWNLOAD_URLS=""
|
||||
|
||||
|
@ -28,7 +28,7 @@ RUN sed -i "s|http://archive.ubuntu.com|${apt_archive}|g" /etc/apt/sources.list
|
||||
|
||||
ARG REPO_CHANNEL="stable"
|
||||
ARG REPOSITORY="deb [signed-by=/usr/share/keyrings/clickhouse-keyring.gpg] https://packages.clickhouse.com/deb ${REPO_CHANNEL} main"
|
||||
ARG VERSION="24.9.1.3278"
|
||||
ARG VERSION="24.9.2.42"
|
||||
ARG PACKAGES="clickhouse-client clickhouse-server clickhouse-common-static"
|
||||
|
||||
#docker-official-library:off
|
||||
|
5
docker/test/integration/runner/misc/openldap/initialized.sh
Executable file
5
docker/test/integration/runner/misc/openldap/initialized.sh
Executable file
@ -0,0 +1,5 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
# workaround for https://github.com/bitnami/containers/issues/73310
|
||||
touch /tmp/.openldap-initialized
|
@ -0,0 +1 @@
|
||||
[rabbitmq_consistent_hash_exchange].
|
@ -13,3 +13,5 @@ ssl_options.fail_if_no_peer_cert = false
|
||||
ssl_options.cacertfile = /etc/rabbitmq/ca-cert.pem
|
||||
ssl_options.certfile = /etc/rabbitmq/server-cert.pem
|
||||
ssl_options.keyfile = /etc/rabbitmq/server-key.pem
|
||||
|
||||
vm_memory_high_watermark.absolute = 2GB
|
||||
|
@ -196,7 +196,6 @@ When writing docs, you can use prepared templates. Copy the code of a template a
|
||||
Templates:
|
||||
|
||||
- [Function](_description_templates/template-function.md)
|
||||
- [Setting](_description_templates/template-setting.md)
|
||||
- [Server Setting](_description_templates/template-server-setting.md)
|
||||
- [Database or Table engine](_description_templates/template-engine.md)
|
||||
- [System table](_description_templates/template-system-table.md)
|
||||
|
@ -1,27 +0,0 @@
|
||||
## setting_name {#setting_name}
|
||||
|
||||
Description.
|
||||
|
||||
For the switch setting, use the typical phrase: “Enables or disables something ...”.
|
||||
|
||||
Possible values:
|
||||
|
||||
*For switcher setting:*
|
||||
|
||||
- 0 — Disabled.
|
||||
- 1 — Enabled.
|
||||
|
||||
*For another setting (typical phrases):*
|
||||
|
||||
- Positive integer.
|
||||
- 0 — Disabled or unlimited or something else.
|
||||
|
||||
Default value: `value`.
|
||||
|
||||
**Additional Info** (Optional)
|
||||
|
||||
The name of an additional section can be any, for example, **Usage**.
|
||||
|
||||
**See Also** (Optional)
|
||||
|
||||
- [link](#)
|
@ -1,11 +0,0 @@
|
||||
sudo apt-get install -y apt-transport-https ca-certificates dirmngr
|
||||
sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 8919F6BD2B48D754
|
||||
|
||||
echo "deb https://packages.clickhouse.com/deb stable main" | sudo tee \
|
||||
/etc/apt/sources.list.d/clickhouse.list
|
||||
sudo apt-get update
|
||||
|
||||
sudo apt-get install -y clickhouse-server clickhouse-client
|
||||
|
||||
sudo service clickhouse-server start
|
||||
clickhouse-client # or "clickhouse-client --password" if you've set up a password.
|
@ -1,6 +0,0 @@
|
||||
sudo yum install -y yum-utils
|
||||
sudo yum-config-manager --add-repo https://packages.clickhouse.com/rpm/clickhouse.repo
|
||||
sudo yum install -y clickhouse-server clickhouse-client
|
||||
|
||||
sudo /etc/init.d/clickhouse-server start
|
||||
clickhouse-client # or "clickhouse-client --password" if you set up a password.
|
@ -1,32 +0,0 @@
|
||||
LATEST_VERSION=$(curl -s https://packages.clickhouse.com/tgz/stable/ | \
|
||||
grep -Eo '[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+' | sort -V -r | head -n 1)
|
||||
export LATEST_VERSION
|
||||
|
||||
case $(uname -m) in
|
||||
x86_64) ARCH=amd64 ;;
|
||||
aarch64) ARCH=arm64 ;;
|
||||
*) echo "Unknown architecture $(uname -m)"; exit 1 ;;
|
||||
esac
|
||||
|
||||
for PKG in clickhouse-common-static clickhouse-common-static-dbg clickhouse-server clickhouse-client
|
||||
do
|
||||
curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION-${ARCH}.tgz" \
|
||||
|| curl -fO "https://packages.clickhouse.com/tgz/stable/$PKG-$LATEST_VERSION.tgz"
|
||||
done
|
||||
|
||||
tar -xzvf "clickhouse-common-static-$LATEST_VERSION-${ARCH}.tgz" \
|
||||
|| tar -xzvf "clickhouse-common-static-$LATEST_VERSION.tgz"
|
||||
sudo "clickhouse-common-static-$LATEST_VERSION/install/doinst.sh"
|
||||
|
||||
tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION-${ARCH}.tgz" \
|
||||
|| tar -xzvf "clickhouse-common-static-dbg-$LATEST_VERSION.tgz"
|
||||
sudo "clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh"
|
||||
|
||||
tar -xzvf "clickhouse-server-$LATEST_VERSION-${ARCH}.tgz" \
|
||||
|| tar -xzvf "clickhouse-server-$LATEST_VERSION.tgz"
|
||||
sudo "clickhouse-server-$LATEST_VERSION/install/doinst.sh" configure
|
||||
sudo /etc/init.d/clickhouse-server start
|
||||
|
||||
tar -xzvf "clickhouse-client-$LATEST_VERSION-${ARCH}.tgz" \
|
||||
|| tar -xzvf "clickhouse-client-$LATEST_VERSION.tgz"
|
||||
sudo "clickhouse-client-$LATEST_VERSION/install/doinst.sh"
|
@ -14,7 +14,12 @@ then
|
||||
HAS_SSE42=$(grep sse4_2 /proc/cpuinfo)
|
||||
if [ "${HAS_SSE42}" ]
|
||||
then
|
||||
DIR="amd64"
|
||||
if ldd --version 2>&1 | grep -q musl
|
||||
then
|
||||
DIR="amd64musl"
|
||||
else
|
||||
DIR="amd64"
|
||||
fi
|
||||
else
|
||||
DIR="amd64compat"
|
||||
fi
|
||||
|
@ -41,7 +41,7 @@ sidebar_label: 2022
|
||||
* Backported in [#25364](https://github.com/ClickHouse/ClickHouse/issues/25364): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#25387](https://github.com/ClickHouse/ClickHouse/issues/25387): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#25455](https://github.com/ClickHouse/ClickHouse/issues/25455): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#25406](https://github.com/ClickHouse/ClickHouse/issues/25406): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#25505](https://github.com/ClickHouse/ClickHouse/issues/25505): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
@ -40,7 +40,7 @@ sidebar_label: 2022
|
||||
* Backported in [#25362](https://github.com/ClickHouse/ClickHouse/issues/25362): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#25386](https://github.com/ClickHouse/ClickHouse/issues/25386): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#25456](https://github.com/ClickHouse/ClickHouse/issues/25456): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#25408](https://github.com/ClickHouse/ClickHouse/issues/25408): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#25504](https://github.com/ClickHouse/ClickHouse/issues/25504): Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
@ -24,7 +24,7 @@ sidebar_label: 2022
|
||||
* Backported in [#25363](https://github.com/ClickHouse/ClickHouse/issues/25363): On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Backported in [#25388](https://github.com/ClickHouse/ClickHouse/issues/25388): Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Backported in [#25448](https://github.com/ClickHouse/ClickHouse/issues/25448): Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Backported in [#25407](https://github.com/ClickHouse/ClickHouse/issues/25407): Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
|
@ -133,7 +133,7 @@ sidebar_label: 2022
|
||||
* On ZooKeeper connection loss `ReplicatedMergeTree` table might wait for background operations to complete before trying to reconnect. It's fixed, now background operations are stopped forcefully. [#25306](https://github.com/ClickHouse/ClickHouse/pull/25306) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
* Fix the possibility of non-deterministic behaviour of the `quantileDeterministic` function and similar. This closes [#20480](https://github.com/ClickHouse/ClickHouse/issues/20480). [#25313](https://github.com/ClickHouse/ClickHouse/pull/25313) ([Alexey Milovidov](https://github.com/alexey-milovidov)).
|
||||
* Fix lost `WHERE` condition in expression-push-down optimization of query plan (setting `query_plan_filter_push_down = 1` by default). Fixes [#25368](https://github.com/ClickHouse/ClickHouse/issues/25368). [#25370](https://github.com/ClickHouse/ClickHouse/pull/25370) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formated query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix `REPLACE` column transformer when used in DDL by correctly quoting the formatted query. This fixes [#23925](https://github.com/ClickHouse/ClickHouse/issues/23925). [#25391](https://github.com/ClickHouse/ClickHouse/pull/25391) ([Amos Bird](https://github.com/amosbird)).
|
||||
* Fix segfault when sharding_key is absent in task config for copier. [#25419](https://github.com/ClickHouse/ClickHouse/pull/25419) ([Nikita Mikhaylov](https://github.com/nikitamikhaylov)).
|
||||
* Fix excessive underscore before the names of the preprocessed configuration files. [#25431](https://github.com/ClickHouse/ClickHouse/pull/25431) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Fix convertion of datetime with timezone for MySQL, PostgreSQL, ODBC. Closes [#5057](https://github.com/ClickHouse/ClickHouse/issues/5057). [#25528](https://github.com/ClickHouse/ClickHouse/pull/25528) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
49
docs/changelogs/v24.3.12.75-lts.md
Normal file
49
docs/changelogs/v24.3.12.75-lts.md
Normal file
@ -0,0 +1,49 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.3.12.75-lts (7cb5dff8019) FIXME as compared to v24.3.11.7-lts (28795d0a47e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#69607](https://github.com/ClickHouse/ClickHouse/issues/69607): Improved memory accounting for cgroups v2 to exclude the amount occupied by the page cache. [#65470](https://github.com/ClickHouse/ClickHouse/pull/65470) ([Nikita Taranov](https://github.com/nickitat)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69785](https://github.com/ClickHouse/ClickHouse/issues/69785): Fix attaching table when pg dbname contains "-" in MaterializedPostgreSQL. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
|
||||
* Backported in [#69461](https://github.com/ClickHouse/ClickHouse/issues/69461): Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#68217](https://github.com/ClickHouse/ClickHouse/issues/68217): Fixed a NULL pointer dereference, triggered by a specially crafted query, that crashed the server via hopEnd, hopStart, tumbleEnd, and tumbleStart. [#68098](https://github.com/ClickHouse/ClickHouse/pull/68098) ([Salvatore Mesoraca](https://github.com/aiven-sal)).
|
||||
* Backported in [#69437](https://github.com/ClickHouse/ClickHouse/issues/69437): After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
|
||||
* Backported in [#69827](https://github.com/ClickHouse/ClickHouse/issues/69827): Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Backported in [#69294](https://github.com/ClickHouse/ClickHouse/issues/69294): Fix merging of aggregated data for grouping sets. [#68744](https://github.com/ClickHouse/ClickHouse/pull/68744) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#70470](https://github.com/ClickHouse/ClickHouse/issues/70470): Fix inf loop after `restore replica` in the replicated merge tree with zero copy. [#69293](https://github.com/ClickHouse/ClickHouse/pull/69293) ([MikhailBurdukov](https://github.com/MikhailBurdukov)).
|
||||
* Backported in [#69456](https://github.com/ClickHouse/ClickHouse/issues/69456): Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#69497](https://github.com/ClickHouse/ClickHouse/issues/69497): Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#69724](https://github.com/ClickHouse/ClickHouse/issues/69724): Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#69668](https://github.com/ClickHouse/ClickHouse/issues/69668): Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#69792](https://github.com/ClickHouse/ClickHouse/issues/69792): Make getHyperrectangleForRowGroup not throw an exception when the data type in parquet file is not convertable into the requested data type. Solved the user's problem when the Parquet file had Decimal64 data type and the column data type was DateTime. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Backported in [#70089](https://github.com/ClickHouse/ClickHouse/issues/70089): Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#70077](https://github.com/ClickHouse/ClickHouse/issues/70077): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#70162](https://github.com/ClickHouse/ClickHouse/issues/70162): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#70232](https://github.com/ClickHouse/ClickHouse/issues/70232): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70179](https://github.com/ClickHouse/ClickHouse/issues/70179): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70241](https://github.com/ClickHouse/ClickHouse/issues/70241): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#70397](https://github.com/ClickHouse/ClickHouse/issues/70397): Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL CATEGORY
|
||||
|
||||
* Backported in [#69526](https://github.com/ClickHouse/ClickHouse/issues/69526):. [#67029](https://github.com/ClickHouse/ClickHouse/pull/67029) ([Alexander Tokmakov](https://github.com/tavplubix)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69506](https://github.com/ClickHouse/ClickHouse/issues/69506): Better handling of errors from azure storage. [#62306](https://github.com/ClickHouse/ClickHouse/pull/62306) ([Anton Popov](https://github.com/CurtizJ)).
|
||||
* Backported in [#69955](https://github.com/ClickHouse/ClickHouse/issues/69955): Output an operation error for ZK Multi request failed operation into log. [#68127](https://github.com/ClickHouse/ClickHouse/pull/68127) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#69485](https://github.com/ClickHouse/ClickHouse/issues/69485): Fix test_role & test_keeper_s3_snapshot integration tests. [#69013](https://github.com/ClickHouse/ClickHouse/pull/69013) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#70028](https://github.com/ClickHouse/ClickHouse/issues/70028): Remove stale moving parts without zookeeper. [#69075](https://github.com/ClickHouse/ClickHouse/pull/69075) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Backported in [#69421](https://github.com/ClickHouse/ClickHouse/issues/69421): Fix: Not-ready Set with parallel replicas. [#69264](https://github.com/ClickHouse/ClickHouse/pull/69264) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Backported in [#69747](https://github.com/ClickHouse/ClickHouse/issues/69747): Add function `kill_ci_runner`. Kill runner when pre-pull failed. [#69557](https://github.com/ClickHouse/ClickHouse/pull/69557) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#69636](https://github.com/ClickHouse/ClickHouse/issues/69636): Add more contexts to the debug action and use it broadly. [#69599](https://github.com/ClickHouse/ClickHouse/pull/69599) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* The test is flaky and the feature experimental. [#70269](https://github.com/ClickHouse/ClickHouse/pull/70269) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Fix test distributed inter server secret in 24.3. [#70325](https://github.com/ClickHouse/ClickHouse/pull/70325) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
73
docs/changelogs/v24.8.5.115-lts.md
Normal file
73
docs/changelogs/v24.8.5.115-lts.md
Normal file
@ -0,0 +1,73 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.8.5.115-lts (8c4cb00a384) FIXME as compared to v24.8.4.13-lts (53195bc189b)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#70046](https://github.com/ClickHouse/ClickHouse/issues/70046): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#69786](https://github.com/ClickHouse/ClickHouse/issues/69786): Fix attaching table when pg dbname contains "-" in MaterializedPostgreSQL. [#62730](https://github.com/ClickHouse/ClickHouse/pull/62730) ([takakawa](https://github.com/takakawa)).
|
||||
* Backported in [#70318](https://github.com/ClickHouse/ClickHouse/issues/70318): Fixed error on generated columns in MaterializedPostgreSQL when adnum ordering is broken [#63161](https://github.com/ClickHouse/ClickHouse/issues/63161). Fixed error on id column with nextval expression as default MaterializedPostgreSQL when there are generated columns in table. Fixed error on dropping publication with symbols except [a-z1-9-]. [#67664](https://github.com/ClickHouse/ClickHouse/pull/67664) ([Kruglov Kirill](https://github.com/1on)).
|
||||
* Backported in [#69467](https://github.com/ClickHouse/ClickHouse/issues/69467): Fix expiration in `RoleCache`. [#67748](https://github.com/ClickHouse/ClickHouse/pull/67748) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#69735](https://github.com/ClickHouse/ClickHouse/issues/69735): Fix crash in `lag`/`lead` which is introduced in [#67091](https://github.com/ClickHouse/ClickHouse/issues/67091). [#68262](https://github.com/ClickHouse/ClickHouse/pull/68262) ([lgbo](https://github.com/lgbo-ustc)).
|
||||
* Backported in [#69444](https://github.com/ClickHouse/ClickHouse/issues/69444): After unexpected restart, fail to start replication of ReplicatedMergeTree due to abnormal handling of covered-by-broken part. [#68584](https://github.com/ClickHouse/ClickHouse/pull/68584) ([baolin](https://github.com/baolinhuang)).
|
||||
* Backported in [#69810](https://github.com/ClickHouse/ClickHouse/issues/69810): Make `ColumnsDescription::toString` format each column using the same `IAST::FormatState object`. This results in uniform columns metadata being written to disk and ZooKeeper. [#68733](https://github.com/ClickHouse/ClickHouse/pull/68733) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Backported in [#69757](https://github.com/ClickHouse/ClickHouse/issues/69757): Fix incorrect results of Fix uniq and GROUP BY for JSON/Dynamic types. [#69203](https://github.com/ClickHouse/ClickHouse/pull/69203) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70195](https://github.com/ClickHouse/ClickHouse/issues/70195): Fix insertion of incomplete type into Dynamic during deserialization. It could lead to `Parameter out of bound` errors. [#69291](https://github.com/ClickHouse/ClickHouse/pull/69291) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#69398](https://github.com/ClickHouse/ClickHouse/issues/69398): Mark Dynamic type as not safe primary key type to avoid issues with Fields. [#69311](https://github.com/ClickHouse/ClickHouse/pull/69311) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#69704](https://github.com/ClickHouse/ClickHouse/issues/69704): Improve restoring of access entities' dependencies. [#69346](https://github.com/ClickHouse/ClickHouse/pull/69346) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#69459](https://github.com/ClickHouse/ClickHouse/issues/69459): Fix undefined behavior when all connection attempts fail getting a connection for insertions. [#69390](https://github.com/ClickHouse/ClickHouse/pull/69390) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#69503](https://github.com/ClickHouse/ClickHouse/issues/69503): Fixed a `LOGICAL_ERROR` with function `sqidDecode` ([#69450](https://github.com/ClickHouse/ClickHouse/issues/69450)). [#69451](https://github.com/ClickHouse/ClickHouse/pull/69451) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#69480](https://github.com/ClickHouse/ClickHouse/issues/69480): Quick fix for s3queue problem on 24.6 or create query with database replicated. [#69454](https://github.com/ClickHouse/ClickHouse/pull/69454) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#69535](https://github.com/ClickHouse/ClickHouse/issues/69535): Fixed case when memory consumption was too high because of the squashing in `INSERT INTO ... SELECT` or `CREATE TABLE AS SELECT` queries. [#69469](https://github.com/ClickHouse/ClickHouse/pull/69469) ([Yarik Briukhovetskyi](https://github.com/yariks5s)).
|
||||
* Backported in [#69696](https://github.com/ClickHouse/ClickHouse/issues/69696): Keep original order of conditions during move to prewhere. Previously the order could change and it could lead to failing queries when the order is important. [#69560](https://github.com/ClickHouse/ClickHouse/pull/69560) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70439](https://github.com/ClickHouse/ClickHouse/issues/70439): Fix vrash during insertion into FixedString column in PostgreSQL engine. [#69584](https://github.com/ClickHouse/ClickHouse/pull/69584) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#69666](https://github.com/ClickHouse/ClickHouse/issues/69666): Fix Keeper multi-request preprocessing after ZNOAUTH error. [#69627](https://github.com/ClickHouse/ClickHouse/pull/69627) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#70191](https://github.com/ClickHouse/ClickHouse/issues/70191): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Backported in [#69798](https://github.com/ClickHouse/ClickHouse/issues/69798): Make getHyperrectangleForRowGroup not throw an exception when the data type in parquet file is not convertable into the requested data type. Solved the user's problem when the Parquet file had Decimal64 data type and the column data type was DateTime. [#69745](https://github.com/ClickHouse/ClickHouse/pull/69745) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
* Backported in [#70410](https://github.com/ClickHouse/ClickHouse/issues/70410): Fixed `maxMapState` throwing 'Bad get' if value type is DateTime64. [#69787](https://github.com/ClickHouse/ClickHouse/pull/69787) ([Michael Kolupaev](https://github.com/al13n321)).
|
||||
* Backported in [#70019](https://github.com/ClickHouse/ClickHouse/issues/70019): Fix analyzer default with old compatibility value. [#69895](https://github.com/ClickHouse/ClickHouse/pull/69895) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#69941](https://github.com/ClickHouse/ClickHouse/issues/69941): Don't check dependencies during CREATE OR REPLACE VIEW during DROP of old table. Previously CREATE OR REPLACE query failed when there are dependent tables of the recreated view. [#69907](https://github.com/ClickHouse/ClickHouse/pull/69907) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70001](https://github.com/ClickHouse/ClickHouse/issues/70001): Now SQL security will work with parameterized views correctly. [#69984](https://github.com/ClickHouse/ClickHouse/pull/69984) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#70081](https://github.com/ClickHouse/ClickHouse/issues/70081): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#70068](https://github.com/ClickHouse/ClickHouse/issues/70068): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#70166](https://github.com/ClickHouse/ClickHouse/issues/70166): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#70236](https://github.com/ClickHouse/ClickHouse/issues/70236): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70203](https://github.com/ClickHouse/ClickHouse/issues/70203): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#70183](https://github.com/ClickHouse/ClickHouse/issues/70183): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70251](https://github.com/ClickHouse/ClickHouse/issues/70251): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70228](https://github.com/ClickHouse/ClickHouse/issues/70228): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70243](https://github.com/ClickHouse/ClickHouse/issues/70243): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#70432](https://github.com/ClickHouse/ClickHouse/issues/70432): Fix possible crash in JSON column. [#70172](https://github.com/ClickHouse/ClickHouse/pull/70172) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70307](https://github.com/ClickHouse/ClickHouse/issues/70307): Fix multiple issues with arrayMin and arrayMax. [#70207](https://github.com/ClickHouse/ClickHouse/pull/70207) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#70274](https://github.com/ClickHouse/ClickHouse/issues/70274): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70345](https://github.com/ClickHouse/ClickHouse/issues/70345): Don't modify global settings with startup scripts. Previously, changing a setting in a startup script would change it globally. [#70310](https://github.com/ClickHouse/ClickHouse/pull/70310) ([Antonio Andelic](https://github.com/antonio2368)).
|
||||
* Backported in [#70426](https://github.com/ClickHouse/ClickHouse/issues/70426): Fix ALTER of Dynamic type with reducing max_types parameter that could lead to server crash. [#70328](https://github.com/ClickHouse/ClickHouse/pull/70328) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70371](https://github.com/ClickHouse/ClickHouse/issues/70371): Fix crash when using WITH FILL incorrectly. [#70338](https://github.com/ClickHouse/ClickHouse/pull/70338) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
|
||||
#### NO CL ENTRY
|
||||
|
||||
* NO CL ENTRY: 'Revert "Backport [#70146](https://github.com/ClickHouse/ClickHouse/issues/70146) to 24.8: Upgrade integration-runner image"'. [#70324](https://github.com/ClickHouse/ClickHouse/pull/70324) ([Max K.](https://github.com/maxknv)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#69961](https://github.com/ClickHouse/ClickHouse/issues/69961): Output an operation error for ZK Multi request failed operation into log. [#68127](https://github.com/ClickHouse/ClickHouse/pull/68127) ([Aleksei Filatov](https://github.com/aalexfvk)).
|
||||
* Backported in [#69491](https://github.com/ClickHouse/ClickHouse/issues/69491): Fix test_role & test_keeper_s3_snapshot integration tests. [#69013](https://github.com/ClickHouse/ClickHouse/pull/69013) ([Shankar](https://github.com/shiyer7474)).
|
||||
* Backported in [#69953](https://github.com/ClickHouse/ClickHouse/issues/69953): Remove stale moving parts without zookeeper. [#69075](https://github.com/ClickHouse/ClickHouse/pull/69075) ([Kirill](https://github.com/kirillgarbar)).
|
||||
* Backported in [#69353](https://github.com/ClickHouse/ClickHouse/issues/69353): Fix: Not-ready Set with parallel replicas. [#69264](https://github.com/ClickHouse/ClickHouse/pull/69264) ([Igor Nikonov](https://github.com/devcrafter)).
|
||||
* Backported in [#69427](https://github.com/ClickHouse/ClickHouse/issues/69427): Fix 24.8 setting compatibility `rows_before_aggregation`. [#69394](https://github.com/ClickHouse/ClickHouse/pull/69394) ([Nikita Fomichev](https://github.com/fm4v)).
|
||||
* Backported in [#69689](https://github.com/ClickHouse/ClickHouse/issues/69689): Add function `kill_ci_runner`. Kill runner when pre-pull failed. [#69557](https://github.com/ClickHouse/ClickHouse/pull/69557) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#69639](https://github.com/ClickHouse/ClickHouse/issues/69639): Add more contexts to the debug action and use it broadly. [#69599](https://github.com/ClickHouse/ClickHouse/pull/69599) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#69721](https://github.com/ClickHouse/ClickHouse/issues/69721): Prohibit `ALTER TABLE ... ADD INDEX ... TYPE` inverted if setting = 0. [#69684](https://github.com/ClickHouse/ClickHouse/pull/69684) ([Robert Schulze](https://github.com/rschu1ze)).
|
||||
* Backported in [#69972](https://github.com/ClickHouse/ClickHouse/issues/69972): S3Queue: support having deprecated settings to not fail server startup. [#69769](https://github.com/ClickHouse/ClickHouse/pull/69769) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#70283](https://github.com/ClickHouse/ClickHouse/issues/70283): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#70260](https://github.com/ClickHouse/ClickHouse/issues/70260): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
* Backported in [#70314](https://github.com/ClickHouse/ClickHouse/issues/70314): CI: Remove await feature from release branches. [#70294](https://github.com/ClickHouse/ClickHouse/pull/70294) ([Max K.](https://github.com/maxknv)).
|
||||
* Backported in [#70380](https://github.com/ClickHouse/ClickHouse/issues/70380): Fix tiny mistake, responsible for some of kafka test flaps. Example [report](https://s3.amazonaws.com/clickhouse-test-reports/0/3198aafac59c368993e7b5f49d95674cc1b1be18/integration_tests__release__[2_4].html). [#70352](https://github.com/ClickHouse/ClickHouse/pull/70352) ([filimonov](https://github.com/filimonov)).
|
||||
* Backported in [#70405](https://github.com/ClickHouse/ClickHouse/issues/70405): Closes [#69634](https://github.com/ClickHouse/ClickHouse/issues/69634). [#70354](https://github.com/ClickHouse/ClickHouse/pull/70354) ([pufit](https://github.com/pufit)).
|
||||
|
33
docs/changelogs/v24.9.2.42-stable.md
Normal file
33
docs/changelogs/v24.9.2.42-stable.md
Normal file
@ -0,0 +1,33 @@
|
||||
---
|
||||
sidebar_position: 1
|
||||
sidebar_label: 2024
|
||||
---
|
||||
|
||||
# 2024 Changelog
|
||||
|
||||
### ClickHouse release v24.9.2.42-stable (de7c791a2ea) FIXME as compared to v24.9.1.3278-stable (6d058d82a8e)
|
||||
|
||||
#### Improvement
|
||||
* Backported in [#70091](https://github.com/ClickHouse/ClickHouse/issues/70091): Add `show_create_query_identifier_quoting_rule` to define identifier quoting behavior of the show create query result. Possible values: - `user_display`: When the identifiers is a keyword. - `when_necessary`: When the identifiers is one of `{"distinct", "all", "table"}`, or it can cause ambiguity: column names, dictionary attribute names. - `always`: Always quote identifiers. [#69448](https://github.com/ClickHouse/ClickHouse/pull/69448) ([tuanpach](https://github.com/tuanpach)).
|
||||
* Backported in [#70100](https://github.com/ClickHouse/ClickHouse/issues/70100): Follow-up to https://github.com/ClickHouse/ClickHouse/pull/69346 Point 4 described there will work now as well:. [#69563](https://github.com/ClickHouse/ClickHouse/pull/69563) ([Vitaly Baranov](https://github.com/vitlibar)).
|
||||
* Backported in [#70048](https://github.com/ClickHouse/ClickHouse/issues/70048): Add new column readonly_duration to the system.replicas table. Needed to be able to distinguish actual readonly replicas from sentinel ones in alerts. [#69871](https://github.com/ClickHouse/ClickHouse/pull/69871) ([Miсhael Stetsyuk](https://github.com/mstetsyuk)).
|
||||
|
||||
#### Bug Fix (user-visible misbehavior in an official stable release)
|
||||
* Backported in [#70193](https://github.com/ClickHouse/ClickHouse/issues/70193): Fix crash when executing `create view t as (with recursive 42 as ttt select ttt);`. [#69676](https://github.com/ClickHouse/ClickHouse/pull/69676) ([Han Fei](https://github.com/hanfei1991)).
|
||||
* Backported in [#70083](https://github.com/ClickHouse/ClickHouse/issues/70083): Closes [#69752](https://github.com/ClickHouse/ClickHouse/issues/69752). [#69985](https://github.com/ClickHouse/ClickHouse/pull/69985) ([pufit](https://github.com/pufit)).
|
||||
* Backported in [#70070](https://github.com/ClickHouse/ClickHouse/issues/70070): Fixes `Block structure mismatch` for queries with nested views and `WHERE` condition. Fixes [#66209](https://github.com/ClickHouse/ClickHouse/issues/66209). [#70054](https://github.com/ClickHouse/ClickHouse/pull/70054) ([Nikolai Kochetov](https://github.com/KochetovNicolai)).
|
||||
* Backported in [#70168](https://github.com/ClickHouse/ClickHouse/issues/70168): Fix wrong LOGICAL_ERROR when replacing literals in ranges. [#70122](https://github.com/ClickHouse/ClickHouse/pull/70122) ([Pablo Marcos](https://github.com/pamarcos)).
|
||||
* Backported in [#70238](https://github.com/ClickHouse/ClickHouse/issues/70238): Check for Nullable(Nothing) type during ALTER TABLE MODIFY COLUMN/QUERY to prevent tables with such data type. [#70123](https://github.com/ClickHouse/ClickHouse/pull/70123) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70205](https://github.com/ClickHouse/ClickHouse/issues/70205): Fix wrong result with skipping index. [#70127](https://github.com/ClickHouse/ClickHouse/pull/70127) ([Raúl Marín](https://github.com/Algunenano)).
|
||||
* Backported in [#70185](https://github.com/ClickHouse/ClickHouse/issues/70185): Fix data race in ColumnObject/ColumnTuple decompress method that could lead to heap use after free. [#70137](https://github.com/ClickHouse/ClickHouse/pull/70137) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70253](https://github.com/ClickHouse/ClickHouse/issues/70253): Fix possible hung in ALTER COLUMN with Dynamic type. [#70144](https://github.com/ClickHouse/ClickHouse/pull/70144) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70230](https://github.com/ClickHouse/ClickHouse/issues/70230): Use correct `max_types` parameter during Dynamic type creation for JSON subcolumn. [#70147](https://github.com/ClickHouse/ClickHouse/pull/70147) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
* Backported in [#70217](https://github.com/ClickHouse/ClickHouse/issues/70217): Fix the password being displayed in `system.query_log` for users with bcrypt password authentication method. [#70148](https://github.com/ClickHouse/ClickHouse/pull/70148) ([Nikolay Degterinsky](https://github.com/evillique)).
|
||||
* Backported in [#70267](https://github.com/ClickHouse/ClickHouse/issues/70267): Respect setting allow_simdjson in JSON type parser. [#70218](https://github.com/ClickHouse/ClickHouse/pull/70218) ([Pavel Kruglov](https://github.com/Avogar)).
|
||||
|
||||
#### NOT FOR CHANGELOG / INSIGNIFICANT
|
||||
|
||||
* Backported in [#70052](https://github.com/ClickHouse/ClickHouse/issues/70052): Improve stateless test runner. [#69864](https://github.com/ClickHouse/ClickHouse/pull/69864) ([Alexey Katsman](https://github.com/alexkats)).
|
||||
* Backported in [#70284](https://github.com/ClickHouse/ClickHouse/issues/70284): Improve pipdeptree generator for docker images. - Update requirements.txt for the integration tests runner container - Remove some small dependencies, improve `helpers/retry_decorator.py` - Upgrade docker-compose from EOL version 1 to version 2. [#70146](https://github.com/ClickHouse/ClickHouse/pull/70146) ([Mikhail f. Shiryaev](https://github.com/Felixoid)).
|
||||
* Backported in [#70261](https://github.com/ClickHouse/ClickHouse/issues/70261): Update test_storage_s3_queue/test.py. [#70159](https://github.com/ClickHouse/ClickHouse/pull/70159) ([Kseniia Sumarokova](https://github.com/kssenii)).
|
||||
|
@ -82,7 +82,7 @@ cd ./utils/check-style
|
||||
# Check duplicate includes
|
||||
./check-duplicate-includes.sh
|
||||
|
||||
# Check c++ formatiing
|
||||
# Check c++ formatting
|
||||
./check-style
|
||||
|
||||
# Check python formatting with black
|
||||
|
@ -63,7 +63,34 @@ Currently there are 3 ways to authenticate:
|
||||
- `SAS Token` - Can be used by providing an `endpoint`, `connection_string` or `storage_account_url`. It is identified by presence of '?' in the url.
|
||||
- `Workload Identity` - Can be used by providing an `endpoint` or `storage_account_url`. If `use_workload_identity` parameter is set in config, ([workload identity](https://github.com/Azure/azure-sdk-for-cpp/tree/main/sdk/identity/azure-identity#authenticate-azure-hosted-applications)) is used for authentication.
|
||||
|
||||
### Data cache {#data-cache}
|
||||
|
||||
`Azure` table engine supports data caching on local disk.
|
||||
See filesystem cache configuration options and usage in this [section](/docs/en/operations/storing-data.md/#using-local-cache).
|
||||
Caching is made depending on the path and ETag of the storage object, so clickhouse will not read a stale cache version.
|
||||
|
||||
To enable caching use a setting `filesystem_cache_name = '<name>'` and `enable_filesystem_cache = 1`.
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM azureBlobStorage('DefaultEndpointsProtocol=http;AccountName=devstoreaccount1;AccountKey=Eby8vdM02xNOcqFlqUwJPLlmEtlCDXJ1OUzFT50uSRZ6IFsuFq2UVErCz4I6tq/K1SZFPTOtr/KBHBeksoGMGw==;BlobEndpoint=http://azurite1:10000/devstoreaccount1/;', 'test_container', 'test_table', 'CSV')
|
||||
SETTINGS filesystem_cache_name = 'cache_for_azure', enable_filesystem_cache = 1;
|
||||
```
|
||||
|
||||
1. add the following section to clickhouse configuration file:
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
<filesystem_caches>
|
||||
<cache_for_azure>
|
||||
<path>path to cache directory</path>
|
||||
<max_size>10Gi</max_size>
|
||||
</cache_for_azure>
|
||||
</filesystem_caches>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
2. reuse cache configuration (and therefore cache storage) from clickhouse `storage_configuration` section, [described here](/docs/en/operations/storing-data.md/#using-local-cache)
|
||||
|
||||
## See also
|
||||
|
||||
|
@ -48,6 +48,10 @@ Using named collections:
|
||||
CREATE TABLE deltalake ENGINE=DeltaLake(deltalake_conf, filename = 'test_table')
|
||||
```
|
||||
|
||||
### Data cache {#data-cache}
|
||||
|
||||
`Iceberg` table engine and table function support data caching same as `S3`, `AzureBlobStorage`, `HDFS` storages. See [here](../../../engines/table-engines/integrations/s3.md#data-cache).
|
||||
|
||||
## See also
|
||||
|
||||
- [deltaLake table function](../../../sql-reference/table-functions/deltalake.md)
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: Iceberg
|
||||
|
||||
# Iceberg Table Engine
|
||||
|
||||
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure and locally stored tables.
|
||||
This engine provides a read-only integration with existing Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure, HDFS and locally stored tables.
|
||||
|
||||
## Create Table
|
||||
|
||||
@ -19,13 +19,16 @@ CREATE TABLE iceberg_table_s3
|
||||
CREATE TABLE iceberg_table_azure
|
||||
ENGINE = IcebergAzure(connection_string|storage_account_url, container_name, blobpath, [account_name, account_key, format, compression])
|
||||
|
||||
CREATE TABLE iceberg_table_hdfs
|
||||
ENGINE = IcebergHDFS(path_to_table, [,format] [,compression_method])
|
||||
|
||||
CREATE TABLE iceberg_table_local
|
||||
ENGINE = IcebergLocal(path_to_table, [,format] [,compression_method])
|
||||
```
|
||||
|
||||
**Engine arguments**
|
||||
|
||||
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage` and `File` correspondingly.
|
||||
Description of the arguments coincides with description of arguments in engines `S3`, `AzureBlobStorage`, `HDFS` and `File` correspondingly.
|
||||
`format` stands for the format of data files in the Iceberg table.
|
||||
|
||||
Engine parameters can be specified using [Named Collections](../../../operations/named-collections.md)
|
||||
@ -60,6 +63,10 @@ CREATE TABLE iceberg_table ENGINE=IcebergS3(iceberg_conf, filename = 'test_table
|
||||
|
||||
Table engine `Iceberg` is an alias to `IcebergS3` now.
|
||||
|
||||
### Data cache {#data-cache}
|
||||
|
||||
`Iceberg` table engine and table function support data caching same as `S3`, `AzureBlobStorage`, `HDFS` storages. See [here](../../../engines/table-engines/integrations/s3.md#data-cache).
|
||||
|
||||
## See also
|
||||
|
||||
- [iceberg table function](/docs/en/sql-reference/table-functions/iceberg.md)
|
||||
|
@ -9,7 +9,7 @@ sidebar_label: MongoDB
|
||||
MongoDB engine is read-only table engine which allows to read data from remote [MongoDB](https://www.mongodb.com/) collection.
|
||||
|
||||
Only MongoDB v3.6+ servers are supported.
|
||||
[Seed list(`mongodb**+srv**`)](https://www.mongodb.com/docs/manual/reference/glossary/#std-term-seed-list) is not yet supported.
|
||||
[Seed list(`mongodb+srv`)](https://www.mongodb.com/docs/manual/reference/glossary/#std-term-seed-list) is not yet supported.
|
||||
|
||||
:::note
|
||||
If you're facing troubles, please report the issue, and try to use [the legacy implementation](../../../operations/server-configuration-parameters/settings.md#use_legacy_mongodb_integration).
|
||||
|
@ -4,12 +4,8 @@ sidebar_position: 138
|
||||
sidebar_label: MySQL
|
||||
---
|
||||
|
||||
import CloudAvailableBadge from '@theme/badges/CloudAvailableBadge';
|
||||
|
||||
# MySQL Table Engine
|
||||
|
||||
<CloudAvailableBadge />
|
||||
|
||||
The MySQL engine allows you to perform `SELECT` and `INSERT` queries on data that is stored on a remote MySQL server.
|
||||
|
||||
## Creating a Table {#creating-a-table}
|
||||
|
@ -26,6 +26,7 @@ SELECT * FROM s3_engine_table LIMIT 2;
|
||||
│ two │ 2 │
|
||||
└──────┴───────┘
|
||||
```
|
||||
|
||||
## Create Table {#creating-a-table}
|
||||
|
||||
``` sql
|
||||
@ -43,6 +44,37 @@ CREATE TABLE s3_engine_table (name String, value UInt32)
|
||||
- `aws_access_key_id`, `aws_secret_access_key` - Long-term credentials for the [AWS](https://aws.amazon.com/) account user. You can use these to authenticate your requests. Parameter is optional. If credentials are not specified, they are used from the configuration file. For more information see [Using S3 for Data Storage](../mergetree-family/mergetree.md#table_engine-mergetree-s3).
|
||||
- `compression` — Compression type. Supported values: `none`, `gzip/gz`, `brotli/br`, `xz/LZMA`, `zstd/zst`. Parameter is optional. By default, it will auto-detect compression by file extension.
|
||||
|
||||
### Data cache {#data-cache}
|
||||
|
||||
`S3` table engine supports data caching on local disk.
|
||||
See filesystem cache configuration options and usage in this [section](/docs/en/operations/storing-data.md/#using-local-cache).
|
||||
Caching is made depending on the path and ETag of the storage object, so clickhouse will not read a stale cache version.
|
||||
|
||||
To enable caching use a setting `filesystem_cache_name = '<name>'` and `enable_filesystem_cache = 1`.
|
||||
|
||||
```sql
|
||||
SELECT *
|
||||
FROM s3('http://minio:10000/clickhouse//test_3.csv', 'minioadmin', 'minioadminpassword', 'CSV')
|
||||
SETTINGS filesystem_cache_name = 'cache_for_s3', enable_filesystem_cache = 1;
|
||||
```
|
||||
|
||||
There are two ways to define cache in configuration file.
|
||||
|
||||
1. add the following section to clickhouse configuration file:
|
||||
|
||||
``` xml
|
||||
<clickhouse>
|
||||
<filesystem_caches>
|
||||
<cache_for_s3>
|
||||
<path>path to cache directory</path>
|
||||
<max_size>10Gi</max_size>
|
||||
</cache_for_s3>
|
||||
</filesystem_caches>
|
||||
</clickhouse>
|
||||
```
|
||||
|
||||
2. reuse cache configuration (and therefore cache storage) from clickhouse `storage_configuration` section, [described here](/docs/en/operations/storing-data.md/#using-local-cache)
|
||||
|
||||
### PARTITION BY
|
||||
|
||||
`PARTITION BY` — Optional. In most cases you don't need a partition key, and if it is needed you generally don't need a partition key more granular than by month. Partitioning does not speed up queries (in contrast to the ORDER BY expression). You should never use too granular partitioning. Don't partition your data by client identifiers or names (instead, make client identifier or name the first column in the ORDER BY expression).
|
||||
|
@ -374,15 +374,15 @@ Users can create [UDF](/docs/en/sql-reference/statements/create/function.md) to
|
||||
```sql
|
||||
CREATE FUNCTION bfEstimateFunctions [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_nubmer_of_all_grams) * log(2));
|
||||
(total_number_of_all_grams, size_of_bloom_filter_in_bits) -> round((size_of_bloom_filter_in_bits / total_number_of_all_grams) * log(2));
|
||||
|
||||
CREATE FUNCTION bfEstimateBmSize [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, probability_of_false_positives) -> ceil((total_nubmer_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||
(total_number_of_all_grams, probability_of_false_positives) -> ceil((total_number_of_all_grams * log(probability_of_false_positives)) / log(1 / pow(2, log(2))));
|
||||
|
||||
CREATE FUNCTION bfEstimateFalsePositive [ON CLUSTER cluster]
|
||||
AS
|
||||
(total_nubmer_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_nubmer_of_all_grams)), number_of_hash_functions);
|
||||
(total_number_of_all_grams, number_of_hash_functions, size_of_bloom_filter_in_bytes) -> pow(1 - exp(-number_of_hash_functions/ (size_of_bloom_filter_in_bytes / total_number_of_all_grams)), number_of_hash_functions);
|
||||
|
||||
CREATE FUNCTION bfEstimateGramNumber [ON CLUSTER cluster]
|
||||
AS
|
||||
|
@ -35,7 +35,7 @@ Engine parameters:
|
||||
|
||||
- `root_path` - ZooKeeper path where the `table_name` will be stored.
|
||||
This path should not contain the prefix defined by `<keeper_map_path_prefix>` config because the prefix will be automatically appended to the `root_path`.
|
||||
Additionally, format of `auxiliary_zookeper_cluster_name:/some/path` is also supported where `auxiliary_zookeper_cluster` is a ZooKeeper cluster defined inside `<auxiliary_zookeepers>` config.
|
||||
Additionally, format of `auxiliary_zookeeper_cluster_name:/some/path` is also supported where `auxiliary_zookeeper_cluster` is a ZooKeeper cluster defined inside `<auxiliary_zookeepers>` config.
|
||||
By default, ZooKeeper cluster defined inside `<zookeeper>` config is used.
|
||||
- `keys_limit` - number of keys allowed inside the table.
|
||||
This limit is a soft limit and it can be possible that more keys will end up in the table for some edge cases.
|
||||
|
@ -195,6 +195,9 @@ You can pass parameters to `clickhouse-client` (all parameters have a default va
|
||||
- `--print-profile-events` – Print `ProfileEvents` packets.
|
||||
- `--profile-events-delay-ms` – Delay between printing `ProfileEvents` packets (-1 - print only totals, 0 - print every single packet).
|
||||
- `--jwt` – If specified, enables authorization via JSON Web Token. Server JWT authorization is available only in ClickHouse Cloud.
|
||||
- `--progress` – Print progress of query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress printing. Default: TTY in interactive mode, disabled in non-interactive.
|
||||
- `--progress-table` – Print a progress table with changing metrics during query execution. Possible values: 'tty|on|1|true|yes' - outputs to TTY in interactive mode; 'err' - outputs to STDERR non-interactive mode; 'off|0|false|no' - disables the progress table. Default: TTY in interactive mode, disabled in non-interactive.
|
||||
- `--enable-progress-table-toggle` – Enable toggling of the progress table by pressing the control key (Space). Only applicable in interactive mode with the progress table printing enabled. Default: 'true'.
|
||||
|
||||
Instead of `--host`, `--port`, `--user` and `--password` options, ClickHouse client also supports connection strings (see next section).
|
||||
|
||||
|
@ -877,7 +877,7 @@ INSERT INTO json_as_object (json) FORMAT JSONAsObject {"any json stucture":1}
|
||||
SELECT time, json FROM json_as_object FORMAT JSONEachRow
|
||||
```
|
||||
|
||||
```resonse
|
||||
```response
|
||||
{"time":"2024-09-16 12:18:10","json":{}}
|
||||
{"time":"2024-09-16 12:18:13","json":{"any json stucture":"1"}}
|
||||
{"time":"2024-09-16 12:18:08","json":{"foo":{"bar":{"x":"y"},"baz":"1"}}}
|
||||
@ -1598,10 +1598,6 @@ the columns from input data will be mapped to the columns from the table by thei
|
||||
Otherwise, the first row will be skipped.
|
||||
If setting [input_format_with_types_use_header](/docs/en/operations/settings/settings-formats.md/#input_format_with_types_use_header) is set to 1,
|
||||
the types from input data will be compared with the types of the corresponding columns from the table. Otherwise, the second row will be skipped.
|
||||
If setting [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) is set to 1,
|
||||
the types in header will be written using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format.
|
||||
If setting [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) is set to 1,
|
||||
the types in header will be read using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format.
|
||||
:::
|
||||
|
||||
## RowBinaryWithDefaults {#rowbinarywithdefaults}
|
||||
@ -1624,6 +1620,10 @@ For column `y` data starts with byte `00` that indicates that column has actual
|
||||
## RowBinary format settings {#row-binary-format-settings}
|
||||
|
||||
- [format_binary_max_string_size](/docs/en/operations/settings/settings-formats.md/#format_binary_max_string_size) - The maximum allowed size for String in RowBinary format. Default value - `1GiB`.
|
||||
- [output_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#output_format_binary_encode_types_in_binary_format) - Allows to write types in header using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes output format. Default value - `false`.
|
||||
- [input_format_binary_encode_types_in_binary_format](/docs/en/operations/settings/settings-formats.md/#input_format_binary_encode_types_in_binary_format) - Allows to read types in header using [binary encoding](/docs/en/sql-reference/data-types/data-types-binary-encoding.md) instead of strings with type names in RowBinaryWithNamesAndTypes input format. Default value - `false`.
|
||||
- [output_format_binary_write_json_as_string](/docs/en/operations/settings/settings-formats.md/#output_format_binary_write_json_as_string) - Allows to write values of [JSON](/docs/en/sql-reference/data-types/newjson.md) data type as JSON [String](/docs/en/sql-reference/data-types/string.md) values in RowBinary output format. Default value - `false`.
|
||||
- [input_format_binary_read_json_as_string](/docs/en/operations/settings/settings-formats.md/#input_format_binary_read_json_as_string) - Allows to read values of [JSON](/docs/en/sql-reference/data-types/newjson.md) data type as JSON [String](/docs/en/sql-reference/data-types/string.md) values in RowBinary input format. Default value - `false`.
|
||||
|
||||
## Values {#data-format-values}
|
||||
|
||||
|
@ -509,7 +509,7 @@ DESC format(JSONEachRow, $$
|
||||
{"value" : "424242424242"}
|
||||
$$)
|
||||
```
|
||||
```reponse
|
||||
```response
|
||||
┌─name──┬─type────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ value │ Nullable(Int64) │ │ │ │ │ │
|
||||
└───────┴─────────────────┴──────────────┴────────────────────┴─────────┴──────────────────┴────────────────┘
|
||||
@ -910,9 +910,9 @@ This setting is disabled by default.
|
||||
|
||||
```sql
|
||||
SET input_format_json_try_infer_numbers_from_strings = 1;
|
||||
DESC format(CSV, '"42","42.42"');
|
||||
DESC format(CSV, '42,42.42');
|
||||
```
|
||||
```reponse
|
||||
```response
|
||||
┌─name─┬─type──────────────┬─default_type─┬─default_expression─┬─comment─┬─codec_expression─┬─ttl_expression─┐
|
||||
│ c1 │ Nullable(Int64) │ │ │ │ │ │
|
||||
│ c2 │ Nullable(Float64) │ │ │ │ │ │
|
||||
|
13
docs/en/interfaces/third-party/gui.md
vendored
13
docs/en/interfaces/third-party/gui.md
vendored
@ -19,6 +19,19 @@ Features:
|
||||
- Performance Optimizations: Utilizes Indexed DB for efficient caching and state management.
|
||||
- Local Data Storage: All data is stored locally in the browser, ensuring no data is sent anywhere else.
|
||||
|
||||
### ChartDB {#chartdb}
|
||||
|
||||
[ChartDB](https://chartdb.io) is a free and open-source tool for visualizing and designing database schemas, including ClickHouse, with a single query. Built with React, it provides a seamless and user-friendly experience, requiring no database credentials or signup to get started.
|
||||
|
||||
Features:
|
||||
|
||||
- Schema Visualization: Instantly import and visualize your ClickHouse schema, including ER diagrams with materialized views and standard views, showing references to tables.
|
||||
- AI-Powered DDL Export: Generate DDL scripts effortlessly for better schema management and documentation.
|
||||
- Multi-SQL Dialect Support: Compatible with a range of SQL dialects, making it versatile for various database environments.
|
||||
- No Signup or Credentials Needed: All functionality is accessible directly in the browser, keeping it frictionless and secure.
|
||||
|
||||
[ChartDB Source Code](https://github.com/chartdb/chartdb).
|
||||
|
||||
### Tabix {#tabix}
|
||||
|
||||
Web interface for ClickHouse in the [Tabix](https://github.com/tabixio/tabix) project.
|
||||
|
@ -31,6 +31,10 @@ The table must be enabled in the server configuration, see the `opentelemetry_sp
|
||||
|
||||
The tags or attributes are saved as two parallel arrays, containing the keys and values. Use [ARRAY JOIN](../sql-reference/statements/select/array-join.md) to work with them.
|
||||
|
||||
## Log-query-settings
|
||||
|
||||
ClickHouse allows you to log changes to query settings during query execution. When enabled, any modifications made to query settings will be recorded in the OpenTelemetry span log. This feature is particularly useful in production environments for tracking configuration changes that may affect query performance.
|
||||
|
||||
## Integration with monitoring systems
|
||||
|
||||
At the moment, there is no ready tool that can export the tracing data from ClickHouse to a monitoring system.
|
||||
|
@ -1057,11 +1057,39 @@ Default value: throw
|
||||
|
||||
## deduplicate_merge_projection_mode
|
||||
|
||||
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. If allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
|
||||
Whether to allow create projection for the table with non-classic MergeTree, that is not (Replicated, Shared) MergeTree. Ignore option is purely for compatibility which might result in incorrect answer. Otherwise, if allowed, what is the action when merge projections, either drop or rebuild. So classic MergeTree would ignore this setting.
|
||||
It also controls `OPTIMIZE DEDUPLICATE` as well, but has effect on all MergeTree family members. Similar to the option `lightweight_mutation_projection_mode`, it is also part level.
|
||||
|
||||
Possible values:
|
||||
|
||||
- throw, drop, rebuild
|
||||
- ignore, throw, drop, rebuild
|
||||
|
||||
Default value: throw
|
||||
Default value: throw
|
||||
|
||||
## min_free_disk_bytes_to_perform_insert
|
||||
|
||||
The minimum number of bytes that should be free in disk space in order to insert data. If the number of available free bytes is less than `min_free_disk_bytes_to_throw_insert` then an exception is thrown and the insert is not executed. Note that this setting:
|
||||
- takes into account the `keep_free_space_bytes` setting.
|
||||
- does not take into account the amount of data that will be written by the `INSERT` operation.
|
||||
- is only checked if a positive (non-zero) number of bytes is specified
|
||||
|
||||
Possible values:
|
||||
|
||||
- Any positive integer.
|
||||
|
||||
Default value: 0 bytes.
|
||||
|
||||
## min_free_disk_ratio_to_perform_insert
|
||||
|
||||
The minimum free to total disk space ratio to perform an `INSERT`. Must be a floating point value between 0 and 1. Note that this setting:
|
||||
- takes into account the `keep_free_space_bytes` setting.
|
||||
- does not take into account the amount of data that will be written by the `INSERT` operation.
|
||||
- is only checked if a positive (non-zero) ratio is specified
|
||||
|
||||
Possible values:
|
||||
|
||||
- Float, 0.0 - 1.0
|
||||
|
||||
Default value: 0.0
|
||||
|
||||
Note that if both `min_free_disk_ratio_to_perform_insert` and `min_free_disk_bytes_to_perform_insert` are specified, ClickHouse will count on the value that will allow to perform inserts on a bigger amount of free memory.
|
||||
|
@ -49,6 +49,18 @@ Default values are defined in [Settings.h](https://github.com/ClickHouse/ClickHo
|
||||
|
||||
See also the description of [max_memory_usage](#settings_max_memory_usage).
|
||||
|
||||
For example if you want to set `max_memory_usage_for_user` to 1000 bytes for a user named `clickhouse_read`, you can use the statement
|
||||
|
||||
``` sql
|
||||
ALTER USER clickhouse_read SETTINGS max_memory_usage_for_user = 1000;
|
||||
```
|
||||
|
||||
You can verify it worked by logging out of your client, logging back in, then use the `getSetting` function:
|
||||
|
||||
```sql
|
||||
SELECT getSetting('max_memory_usage_for_user');
|
||||
```
|
||||
|
||||
## max_rows_to_read {#max-rows-to-read}
|
||||
|
||||
The following restrictions can be checked on each block (instead of on each row). That is, the restrictions can be broken a little.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -6,7 +6,7 @@ sidebar_label: User Settings
|
||||
|
||||
# Users and Roles Settings
|
||||
|
||||
The `users` section of the `user.xml` configuration file contains user settings.
|
||||
The `users` section of the `users.xml` configuration file contains user settings.
|
||||
|
||||
:::note
|
||||
ClickHouse also supports [SQL-driven workflow](../../guides/sre/user-management/index.md#access-control) for managing users. We recommend using it.
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -10,21 +10,21 @@ Columns:
|
||||
|
||||
- `database` ([String](../../sql-reference/data-types/string.md)) — The name of the database the table is in.
|
||||
- `view` ([String](../../sql-reference/data-types/string.md)) — Table name.
|
||||
- `uuid` ([UUID](../../sql-reference/data-types/uuid.md)) — Table uuid (Atomic database).
|
||||
- `status` ([String](../../sql-reference/data-types/string.md)) — Current state of the refresh.
|
||||
- `last_refresh_result` ([String](../../sql-reference/data-types/string.md)) — Outcome of the latest refresh attempt.
|
||||
- `last_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last refresh attempt. `NULL` if no refresh attempts happened since server startup or table creation.
|
||||
- `last_success_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time of the last successful refresh. `NULL` if no successful refreshes happened since server startup or table creation.
|
||||
- `duration_ms` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How long the last refresh attempt took.
|
||||
- `next_refresh_time` ([DateTime](../../sql-reference/data-types/datetime.md)) — Time at which the next refresh is scheduled to start.
|
||||
- `remaining_dependencies` ([Array(String)](../../sql-reference/data-types/array.md)) — If the view has [refresh dependencies](../../sql-reference/statements/create/view.md#refresh-dependencies), this array contains the subset of those dependencies that are not satisfied for the current refresh yet. If `status = 'WaitingForDependencies'`, a refresh is ready to start as soon as these dependencies are fulfilled.
|
||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — if `last_refresh_result = 'Error'`, i.e. the last refresh attempt failed, this column contains the corresponding error message and stack trace.
|
||||
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — If nonzero, the current or next refresh is a retry (see `refresh_retries` refresh setting), and `retry` is the 1-based index of that retry.
|
||||
- `refresh_count` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of successful refreshes since last server restart or table creation.
|
||||
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far.
|
||||
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh.
|
||||
|
||||
(There are additional columns related to current refresh progress, but they are currently unreliable.)
|
||||
- `last_success_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest successful refresh started. NULL if no successful refreshes happened since server startup or table creation.
|
||||
- `last_success_duration_ms` ([Nullable](../../sql-reference/data-types/nullable.md)([UInt64](../../sql-reference/data-types/int-uint.md))) — How long the latest refresh took.
|
||||
- `last_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time when the latest refresh attempt finished (if known) or started (if unknown or still running). NULL if no refresh attempts happened since server startup or table creation.
|
||||
- `last_refresh_replica` ([String](../../sql-reference/data-types/string.md)) — If coordination is enabled, name of the replica that made the current (if running) or previous (if not running) refresh attempt.
|
||||
- `next_refresh_time` ([Nullable](../../sql-reference/data-types/nullable.md)([DateTime](../../sql-reference/data-types/datetime.md))) — Time at which the next refresh is scheduled to start, if status = Scheduled.
|
||||
- `exception` ([String](../../sql-reference/data-types/string.md)) — Error message from previous attempt if it failed.
|
||||
- `retry` ([UInt64](../../sql-reference/data-types/int-uint.md)) — How many failed attempts there were so far, for the current refresh.
|
||||
- `progress` ([Float64](../../sql-reference/data-types/float.md)) — Progress of the current refresh, between 0 and 1. Not available if status is `RunningOnAnotherReplica`.
|
||||
- `read_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows read by the current refresh so far. Not available if status is `RunningOnAnotherReplica`.
|
||||
- `read_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of bytes read during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||
- `total_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Estimated total number of rows that need to be read by the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||
- `written_rows` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number of rows written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||
- `written_bytes` ([UInt64](../../sql-reference/data-types/int-uint.md)) — Number rof bytes written during the current refresh. Not available if status is `RunningOnAnotherReplica`.
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -177,6 +177,26 @@ When you are ready to insert your files into ClickHouse, startup a ClickHouse se
|
||||
:::
|
||||
|
||||
|
||||
## Format Conversions
|
||||
|
||||
You can use `clickhouse-local` for converting data between different formats. Example:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-local --input-format JSONLines --output-format CSV --query "SELECT * FROM table" < data.json > data.csv
|
||||
```
|
||||
|
||||
Formats are auto-detected from file extensions:
|
||||
|
||||
``` bash
|
||||
$ clickhouse-local --query "SELECT * FROM table" < data.json > data.csv
|
||||
```
|
||||
|
||||
As a shortcut, you can write it using the `--copy` argument:
|
||||
``` bash
|
||||
$ clickhouse-local --copy < data.json > data.csv
|
||||
```
|
||||
|
||||
|
||||
## Usage {#usage}
|
||||
|
||||
By default `clickhouse-local` has access to data of a ClickHouse server on the same host, and it does not depend on the server's configuration. It also supports loading server configuration using `--config-file` argument. For temporary data, a unique temporary data directory is created by default.
|
||||
|
@ -124,7 +124,7 @@ Converts an aggregate function for tables into an aggregate function for arrays
|
||||
## -Distinct
|
||||
|
||||
Every unique combination of arguments will be aggregated only once. Repeating values are ignored.
|
||||
Examples: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTINCT x, y)` and so on.
|
||||
Examples: `sum(DISTINCT x)` (or `sumDistinct(x)`), `groupArray(DISTINCT x)` (or `groupArrayDistinct(x)`), `corrStable(DISTINCT x, y)` (or `corrStableDistinct(x, y)`) and so on.
|
||||
|
||||
## -OrDefault
|
||||
|
||||
|
@ -261,9 +261,10 @@ windowFunnel(window, [mode, [mode, ... ]])(timestamp, cond1, cond2, ..., condN)
|
||||
|
||||
- `window` — Length of the sliding window, it is the time interval between the first and the last condition. The unit of `window` depends on the `timestamp` itself and varies. Determined using the expression `timestamp of cond1 <= timestamp of cond2 <= ... <= timestamp of condN <= timestamp of cond1 + window`.
|
||||
- `mode` — It is an optional argument. One or more modes can be set.
|
||||
- `'strict_deduplication'` — If the same condition holds for the sequence of events, then such repeating event interrupts further processing.
|
||||
- `'strict_deduplication'` — If the same condition holds for the sequence of events, then such repeating event interrupts further processing. Note: it may work unexpectedly if several conditions hold for the same event.
|
||||
- `'strict_order'` — Don't allow interventions of other events. E.g. in the case of `A->B->D->C`, it stops finding `A->B->C` at the `D` and the max event level is 2.
|
||||
- `'strict_increase'` — Apply conditions only to events with strictly increasing timestamps.
|
||||
- `'strict_once'` — Count each event only once in the chain even if it meets the condition several times
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -490,7 +491,7 @@ Where:
|
||||
|
||||
## uniqUpTo(N)(x)
|
||||
|
||||
Calculates the number of different values of the argument up to a specified limit, `N`. If the number of different argument values is greater than `N`, this function returns `N` + 1, otherwise it calculates the exact value.
|
||||
Calculates the number of different values of the argument up to a specified limit, `N`. If the number of different argument values is greater than `N`, this function returns `N` + 1, otherwise it calculates the exact value.
|
||||
|
||||
Recommended for use with small `N`s, up to 10. The maximum value of `N` is 100.
|
||||
|
||||
@ -522,7 +523,7 @@ This function behaves the same as [sumMap](../../sql-reference/aggregate-functio
|
||||
- `keys`: [Array](../data-types/array.md) of keys.
|
||||
- `values`: [Array](../data-types/array.md) of values.
|
||||
|
||||
**Returned Value**
|
||||
**Returned Value**
|
||||
|
||||
- Returns a tuple of two arrays: keys in sorted order, and values summed for the corresponding keys.
|
||||
|
||||
@ -539,10 +540,10 @@ CREATE TABLE sum_map
|
||||
)
|
||||
ENGINE = Log
|
||||
|
||||
INSERT INTO sum_map VALUES
|
||||
('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]),
|
||||
INSERT INTO sum_map VALUES
|
||||
('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]);
|
||||
```
|
||||
|
||||
@ -572,7 +573,7 @@ This function behaves the same as [sumMap](../../sql-reference/aggregate-functio
|
||||
- `keys`: [Array](../data-types/array.md) of keys.
|
||||
- `values`: [Array](../data-types/array.md) of values.
|
||||
|
||||
**Returned Value**
|
||||
**Returned Value**
|
||||
|
||||
- Returns a tuple of two arrays: keys in sorted order, and values summed for the corresponding keys.
|
||||
|
||||
@ -591,10 +592,10 @@ CREATE TABLE sum_map
|
||||
)
|
||||
ENGINE = Log
|
||||
|
||||
INSERT INTO sum_map VALUES
|
||||
('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]),
|
||||
INSERT INTO sum_map VALUES
|
||||
('2000-01-01', '2000-01-01 00:00:00', [1, 2, 3], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:00:00', [3, 4, 5], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [4, 5, 6], [10, 10, 10]),
|
||||
('2000-01-01', '2000-01-01 00:01:00', [6, 7, 8], [10, 10, 10]);
|
||||
```
|
||||
|
||||
|
@ -1,190 +0,0 @@
|
||||
---
|
||||
slug: /en/sql-reference/ansi
|
||||
sidebar_position: 40
|
||||
sidebar_label: ANSI Compatibility
|
||||
title: "ANSI SQL Compatibility of ClickHouse SQL Dialect"
|
||||
---
|
||||
|
||||
:::note
|
||||
This article relies on Table 38, “Feature taxonomy and definition for mandatory features”, Annex F of [ISO/IEC CD 9075-2:2011](https://www.iso.org/obp/ui/#iso:std:iso-iec:9075:-2:ed-4:v1:en:sec:8).
|
||||
:::
|
||||
|
||||
## Differences in Behaviour
|
||||
|
||||
The following table lists cases when query feature works in ClickHouse, but behaves not as specified in ANSI SQL.
|
||||
|
||||
| Feature ID | Feature Name | Difference |
|
||||
|------------|-----------------------------|-----------------------------------------------------------------------------------------------------------|
|
||||
| E011 | Numeric data types | Numeric literal with period is interpreted as approximate (`Float64`) instead of exact (`Decimal`) |
|
||||
| E051-05 | Select items can be renamed | Item renames have a wider visibility scope than just the SELECT result |
|
||||
| E141-01 | NOT NULL constraints | `NOT NULL` is implied for table columns by default |
|
||||
| E011-04 | Arithmetic operators | ClickHouse overflows instead of checked arithmetic and changes the result data type based on custom rules |
|
||||
|
||||
## Feature Status
|
||||
|
||||
| Feature ID | Feature Name | Status | Comment |
|
||||
|------------|--------------------------------------------------------------------------------------------------------------------------|----------------------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
|
||||
| **E011** | **Numeric data types** | <span class="text-warning">Partial</span> | |
|
||||
| E011-01 | INTEGER and SMALLINT data types | <span class="text-success">Yes</span> | |
|
||||
| E011-02 | REAL, DOUBLE PRECISION and FLOAT data types data types | <span class="text-success">Yes</span> | |
|
||||
| E011-03 | DECIMAL and NUMERIC data types | <span class="text-success">Yes</span> | |
|
||||
| E011-04 | Arithmetic operators | <span class="text-success">Yes</span> | |
|
||||
| E011-05 | Numeric comparison | <span class="text-success">Yes</span> | |
|
||||
| E011-06 | Implicit casting among the numeric data types | <span class="text-danger">No</span> | ANSI SQL allows arbitrary implicit cast between numeric types, while ClickHouse relies on functions having multiple overloads instead of implicit cast |
|
||||
| **E021** | **Character string types** | <span class="text-warning">Partial</span> | |
|
||||
| E021-01 | CHARACTER data type | <span class="text-success">Yes</span> | |
|
||||
| E021-02 | CHARACTER VARYING data type | <span class="text-success">Yes</span> | |
|
||||
| E021-03 | Character literals | <span class="text-success">Yes</span> | |
|
||||
| E021-04 | CHARACTER_LENGTH function | <span class="text-warning">Partial</span> | No `USING` clause |
|
||||
| E021-05 | OCTET_LENGTH function | <span class="text-danger">No</span> | `LENGTH` behaves similarly |
|
||||
| E021-06 | SUBSTRING | <span class="text-warning">Partial</span> | No support for `SIMILAR` and `ESCAPE` clauses, no `SUBSTRING_REGEX` variant |
|
||||
| E021-07 | Character concatenation | <span class="text-warning">Partial</span> | No `COLLATE` clause |
|
||||
| E021-08 | UPPER and LOWER functions | <span class="text-success">Yes</span> | |
|
||||
| E021-09 | TRIM function | <span class="text-success">Yes</span> | |
|
||||
| E021-10 | Implicit casting among the fixed-length and variable-length character string types | <span class="text-warning">Partial</span> | ANSI SQL allows arbitrary implicit cast between string types, while ClickHouse relies on functions having multiple overloads instead of implicit cast |
|
||||
| E021-11 | POSITION function | <span class="text-warning">Partial</span> | No support for `IN` and `USING` clauses, no `POSITION_REGEX` variant |
|
||||
| E021-12 | Character comparison | <span class="text-success">Yes</span> | |
|
||||
| **E031** | **Identifiers** | <span class="text-warning">Partial</span>| |
|
||||
| E031-01 | Delimited identifiers | <span class="text-warning">Partial</span> | Unicode literal support is limited |
|
||||
| E031-02 | Lower case identifiers | <span class="text-success">Yes</span> | |
|
||||
| E031-03 | Trailing underscore | <span class="text-success">Yes</span> | |
|
||||
| **E051** | **Basic query specification** | <span class="text-warning">Partial</span>| |
|
||||
| E051-01 | SELECT DISTINCT | <span class="text-success">Yes</span> | |
|
||||
| E051-02 | GROUP BY clause | <span class="text-success">Yes</span> | |
|
||||
| E051-04 | GROUP BY can contain columns not in `<select list>` | <span class="text-success">Yes</span> | |
|
||||
| E051-05 | Select items can be renamed | <span class="text-success">Yes</span> | |
|
||||
| E051-06 | HAVING clause | <span class="text-success">Yes</span> | |
|
||||
| E051-07 | Qualified \* in select list | <span class="text-success">Yes</span> | |
|
||||
| E051-08 | Correlation name in the FROM clause | <span class="text-success">Yes</span> | |
|
||||
| E051-09 | Rename columns in the FROM clause | <span class="text-danger">No</span> | |
|
||||
| **E061** | **Basic predicates and search conditions** | <span class="text-warning">Partial</span> | |
|
||||
| E061-01 | Comparison predicate | <span class="text-success">Yes</span> | |
|
||||
| E061-02 | BETWEEN predicate | <span class="text-warning">Partial</span> | No `SYMMETRIC` and `ASYMMETRIC` clause |
|
||||
| E061-03 | IN predicate with list of values | <span class="text-success">Yes</span> | |
|
||||
| E061-04 | LIKE predicate | <span class="text-success">Yes</span> | |
|
||||
| E061-05 | LIKE predicate: ESCAPE clause | <span class="text-danger">No</span> | |
|
||||
| E061-06 | NULL predicate | <span class="text-success">Yes</span> | |
|
||||
| E061-07 | Quantified comparison predicate | <span class="text-danger">No</span> | |
|
||||
| E061-08 | EXISTS predicate | <span class="text-danger">No</span> | |
|
||||
| E061-09 | Subqueries in comparison predicate | <span class="text-success">Yes</span> | |
|
||||
| E061-11 | Subqueries in IN predicate | <span class="text-success">Yes</span> | |
|
||||
| E061-12 | Subqueries in quantified comparison predicate | <span class="text-danger">No</span> | |
|
||||
| E061-13 | Correlated subqueries | <span class="text-danger">No</span> | |
|
||||
| E061-14 | Search condition | <span class="text-success">Yes</span> | |
|
||||
| **E071** | **Basic query expressions** | <span class="text-warning">Partial</span> | |
|
||||
| E071-01 | UNION DISTINCT table operator | <span class="text-success">Yes</span> | |
|
||||
| E071-02 | UNION ALL table operator | <span class="text-success">Yes</span> | |
|
||||
| E071-03 | EXCEPT DISTINCT table operator | <span class="text-danger">No</span> | |
|
||||
| E071-05 | Columns combined via table operators need not have exactly the same data type | <span class="text-success">Yes</span> | |
|
||||
| E071-06 | Table operators in subqueries | <span class="text-success">Yes</span> | |
|
||||
| **E081** | **Basic privileges** | <span class="text-success">Yes</span> |
|
||||
| E081-01 | SELECT privilege at the table level | <span class="text-success">Yes</span> |
|
||||
| E081-02 | DELETE privilege | |
|
||||
| E081-03 | INSERT privilege at the table level | <span class="text-success">Yes</span> |
|
||||
| E081-04 | UPDATE privilege at the table level | <span class="text-success">Yes</span> |
|
||||
| E081-05 | UPDATE privilege at the column level | |
|
||||
| E081-06 | REFERENCES privilege at the table level | | |
|
||||
| E081-07 | REFERENCES privilege at the column level | | |
|
||||
| E081-08 | WITH GRANT OPTION | <span class="text-success">Yes</span> | |
|
||||
| E081-09 | USAGE privilege | | |
|
||||
| E081-10 | EXECUTE privilege | | |
|
||||
| **E091** | **Set functions** |<span class="text-success">Yes</span> |
|
||||
| E091-01 | AVG | <span class="text-success">Yes</span> | |
|
||||
| E091-02 | COUNT | <span class="text-success">Yes</span> | |
|
||||
| E091-03 | MAX | <span class="text-success">Yes</span> | |
|
||||
| E091-04 | MIN | <span class="text-success">Yes</span> | |
|
||||
| E091-05 | SUM | <span class="text-success">Yes</span> | |
|
||||
| E091-06 | ALL quantifier | <span class="text-success">Yes</span> | |
|
||||
| E091-07 | DISTINCT quantifier | <span class="text-success">Yes</span> | Not all aggregate functions supported |
|
||||
| **E101** | **Basic data manipulation** | <span class="text-warning">Partial</span> | |
|
||||
| E101-01 | INSERT statement | <span class="text-success">Yes</span> | Note: primary key in ClickHouse does not imply the `UNIQUE` constraint |
|
||||
| E101-03 | Searched UPDATE statement | <span class="text-warning">Partial</span> | There’s an `ALTER UPDATE` statement for batch data modification |
|
||||
| E101-04 | Searched DELETE statement | <span class="text-warning">Partial</span> | There’s an `ALTER DELETE` statement for batch data removal |
|
||||
| **E111** | **Single row SELECT statement** | <span class="text-danger">No</span> | |
|
||||
| **E121** | **Basic cursor support** | <span class="text-danger">No</span> | |
|
||||
| E121-01 | DECLARE CURSOR | <span class="text-danger">No</span> | |
|
||||
| E121-02 | ORDER BY columns need not be in select list | <span class="text-success">Yes</span> | |
|
||||
| E121-03 | Value expressions in ORDER BY clause | <span class="text-success">Yes</span> | |
|
||||
| E121-04 | OPEN statement | <span class="text-danger">No</span> | |
|
||||
| E121-06 | Positioned UPDATE statement | <span class="text-danger">No</span> | |
|
||||
| E121-07 | Positioned DELETE statement | <span class="text-danger">No</span> | |
|
||||
| E121-08 | CLOSE statement | <span class="text-danger">No</span> | |
|
||||
| E121-10 | FETCH statement: implicit NEXT | <span class="text-danger">No</span> | |
|
||||
| E121-17 | WITH HOLD cursors | <span class="text-danger">No</span> | |
|
||||
| **E131** | **Null value support (nulls in lieu of values)** | <span class="text-success">Yes</span> | Some restrictions apply |
|
||||
| **E141** | **Basic integrity constraints** | <span class="text-warning">Partial</span> | |
|
||||
| E141-01 | NOT NULL constraints | <span class="text-success">Yes</span> | Note: `NOT NULL` is implied for table columns by default |
|
||||
| E141-02 | UNIQUE constraint of NOT NULL columns | <span class="text-danger">No</span> | |
|
||||
| E141-03 | PRIMARY KEY constraints | <span class="text-warning">Partial</span> | |
|
||||
| E141-04 | Basic FOREIGN KEY constraint with the NO ACTION default for both referential delete action and referential update action | <span class="text-danger">No</span> | |
|
||||
| E141-06 | CHECK constraint | <span class="text-success">Yes</span> | |
|
||||
| E141-07 | Column defaults | <span class="text-success">Yes</span> | |
|
||||
| E141-08 | NOT NULL inferred on PRIMARY KEY | <span class="text-success">Yes</span> | |
|
||||
| E141-10 | Names in a foreign key can be specified in any order | <span class="text-danger">No</span> | |
|
||||
| **E151** | **Transaction support** | <span class="text-danger">No</span> | |
|
||||
| E151-01 | COMMIT statement | <span class="text-danger">No</span> | |
|
||||
| E151-02 | ROLLBACK statement | <span class="text-danger">No</span> | |
|
||||
| **E152** | **Basic SET TRANSACTION statement** | <span class="text-danger">No</span> | |
|
||||
| E152-01 | SET TRANSACTION statement: ISOLATION LEVEL SERIALIZABLE clause | <span class="text-danger">No</span> | |
|
||||
| E152-02 | SET TRANSACTION statement: READ ONLY and READ WRITE clauses | <span class="text-danger">No</span> | |
|
||||
| **E153** | **Updatable queries with subqueries** | <span class="text-success">Yes</span> | |
|
||||
| **E161** | **SQL comments using leading double minus** | <span class="text-success">Yes</span> | |
|
||||
| **E171** | **SQLSTATE support** | <span class="text-danger">No</span> | |
|
||||
| **E182** | **Host language binding** | <span class="text-danger">No</span> | |
|
||||
| **F031** | **Basic schema manipulation** | <span class="text-warning">Partial</span>| |
|
||||
| F031-01 | CREATE TABLE statement to create persistent base tables | <span class="text-warning">Partial</span> | No `SYSTEM VERSIONING`, `ON COMMIT`, `GLOBAL`, `LOCAL`, `PRESERVE`, `DELETE`, `REF IS`, `WITH OPTIONS`, `UNDER`, `LIKE`, `PERIOD FOR` clauses and no support for user resolved data types |
|
||||
| F031-02 | CREATE VIEW statement | <span class="text-warning">Partial</span> | No `RECURSIVE`, `CHECK`, `UNDER`, `WITH OPTIONS` clauses and no support for user resolved data types |
|
||||
| F031-03 | GRANT statement | <span class="text-success">Yes</span> | |
|
||||
| F031-04 | ALTER TABLE statement: ADD COLUMN clause | <span class="text-success">Yes</span> | No support for `GENERATED` clause and system time period |
|
||||
| F031-13 | DROP TABLE statement: RESTRICT clause | <span class="text-danger">No</span> | |
|
||||
| F031-16 | DROP VIEW statement: RESTRICT clause | <span class="text-danger">No</span> | |
|
||||
| F031-19 | REVOKE statement: RESTRICT clause | <span class="text-danger">No</span> | |
|
||||
| **F041** | **Basic joined table** | <span class="text-warning">Partial</span> | |
|
||||
| F041-01 | Inner join (but not necessarily the INNER keyword) | <span class="text-success">Yes</span> | |
|
||||
| F041-02 | INNER keyword | <span class="text-success">Yes</span> | |
|
||||
| F041-03 | LEFT OUTER JOIN | <span class="text-success">Yes</span> | |
|
||||
| F041-04 | RIGHT OUTER JOIN | <span class="text-success">Yes</span> | |
|
||||
| F041-05 | Outer joins can be nested | <span class="text-success">Yes</span> | |
|
||||
| F041-07 | The inner table in a left or right outer join can also be used in an inner join | <span class="text-success">Yes</span> | |
|
||||
| F041-08 | All comparison operators are supported (rather than just =) | <span class="text-danger">No</span> | |
|
||||
| **F051** | **Basic date and time** | <span class="text-warning">Partial</span> | |
|
||||
| F051-01 | DATE data type (including support of DATE literal) | <span class="text-success">Yes</span> | |
|
||||
| F051-02 | TIME data type (including support of TIME literal) with fractional seconds precision of at least 0 | <span class="text-danger">No</span> | |
|
||||
| F051-03 | TIMESTAMP data type (including support of TIMESTAMP literal) with fractional seconds precision of at least 0 and 6 | <span class="text-success">Yes</span> | |
|
||||
| F051-04 | Comparison predicate on DATE, TIME, and TIMESTAMP data types | <span class="text-success">Yes</span> | |
|
||||
| F051-05 | Explicit CAST between datetime types and character string types | <span class="text-success">Yes</span> | |
|
||||
| F051-06 | CURRENT_DATE | <span class="text-danger">No</span> | `today()` is similar |
|
||||
| F051-07 | LOCALTIME | <span class="text-danger">No</span> | `now()` is similar |
|
||||
| F051-08 | LOCALTIMESTAMP | <span class="text-danger">No</span> | |
|
||||
| **F081** | **UNION and EXCEPT in views** | <span class="text-warning">Partial</span> | |
|
||||
| **F131** | **Grouped operations** | <span class="text-warning">Partial</span> | |
|
||||
| F131-01 | WHERE, GROUP BY, and HAVING clauses supported in queries with grouped views | <span class="text-success">Yes</span> | |
|
||||
| F131-02 | Multiple tables supported in queries with grouped views | <span class="text-success">Yes</span> | |
|
||||
| F131-03 | Set functions supported in queries with grouped views | <span class="text-success">Yes</span> | |
|
||||
| F131-04 | Subqueries with GROUP BY and HAVING clauses and grouped views | <span class="text-success">Yes</span> | |
|
||||
| F131-05 | Single row SELECT with GROUP BY and HAVING clauses and grouped views | <span class="text-danger">No</span> | |
|
||||
| **F181** | **Multiple module support** | <span class="text-danger">No</span> | |
|
||||
| **F201** | **CAST function** | <span class="text-success">Yes</span> | |
|
||||
| **F221** | **Explicit defaults** | <span class="text-danger">No</span> | |
|
||||
| **F261** | **CASE expression** | <span class="text-success">Yes</span> | |
|
||||
| F261-01 | Simple CASE | <span class="text-success">Yes</span> | |
|
||||
| F261-02 | Searched CASE | <span class="text-success">Yes</span> | |
|
||||
| F261-03 | NULLIF | <span class="text-success">Yes</span> | |
|
||||
| F261-04 | COALESCE | <span class="text-success">Yes</span> | |
|
||||
| **F311** | **Schema definition statement** | <span class="text-warning">Partial</span> | |
|
||||
| F311-01 | CREATE SCHEMA | <span class="text-warning">Partial</span> | See CREATE DATABASE |
|
||||
| F311-02 | CREATE TABLE for persistent base tables | <span class="text-success">Yes</span> | |
|
||||
| F311-03 | CREATE VIEW | <span class="text-success">Yes</span> | |
|
||||
| F311-04 | CREATE VIEW: WITH CHECK OPTION | <span class="text-danger">No</span> | |
|
||||
| F311-05 | GRANT statement | <span class="text-success">Yes</span> | |
|
||||
| **F471** | **Scalar subquery values** | <span class="text-success">Yes</span> | |
|
||||
| **F481** | **Expanded NULL predicate** | <span class="text-success">Yes</span> | |
|
||||
| **F812** | **Basic flagging** | <span class="text-danger">No</span> | |
|
||||
| **S011** | **Distinct data types** | | |
|
||||
| **T321** | **Basic SQL-invoked routines** | <span class="text-danger">No</span> | |
|
||||
| T321-01 | User-defined functions with no overloading | <span class="text-danger">No</span> | |
|
||||
| T321-02 | User-defined stored procedures with no overloading | <span class="text-danger">No</span> | |
|
||||
| T321-03 | Function invocation | <span class="text-danger">No</span> | |
|
||||
| T321-04 | CALL statement | <span class="text-danger">No</span> | |
|
||||
| T321-05 | RETURN statement | <span class="text-danger">No</span> | |
|
||||
| **T631** | **IN predicate with one list element** | <span class="text-success">Yes</span> | |
|
@ -86,7 +86,7 @@ The table below describes how different interval kinds of `Interval` data type a
|
||||
|
||||
### Aggregate function parameter binary encoding
|
||||
|
||||
The table below describes how parameters of `AggragateFunction` and `SimpleAggregateFunction` are encoded.
|
||||
The table below describes how parameters of `AggregateFunction` and `SimpleAggregateFunction` are encoded.
|
||||
The encoding of a parameter consists of 1 byte indicating the type of the parameter and the value itself.
|
||||
|
||||
| Parameter type | Binary encoding |
|
||||
@ -106,7 +106,7 @@ The encoding of a parameter consists of 1 byte indicating the type of the parame
|
||||
| `String` | `0x0C<var_uint_size><data>` |
|
||||
| `Array` | `0x0D<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Tuple` | `0x0E<var_uint_size><value_encoding_1>...<value_encoding_N>` |
|
||||
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_endoding_N><value_encoding_N>` |
|
||||
| `Map` | `0x0F<var_uint_size><key_encoding_1><value_encoding_1>...<key_encoding_N><value_encoding_N>` |
|
||||
| `IPv4` | `0x10<uint32_little_endian_value>` |
|
||||
| `IPv6` | `0x11<uint128_little_endian_value>` |
|
||||
| `UUID` | `0x12<uuid_value>` |
|
||||
|
@ -297,99 +297,257 @@ $$)
|
||||
└───────────────┴────────────────┴───────────────┴──────┴───────┴────────────┴─────────┘
|
||||
```
|
||||
|
||||
## Comparing values of Dynamic type
|
||||
## Using Dynamic type in functions
|
||||
|
||||
Values of `Dynamic` types are compared similar to values of `Variant` type:
|
||||
Most of the functions support arguments with type `Dynamic`. In this case the function is executed separately on each internal data type stored inside `Dynamic` column.
|
||||
When the result type of the function depends on the arguments types, the result of such function executed with `Dynamic` arguments will be `Dynamic`. When the result type of the function doesn't depend on the arguments types - the result will be `Nullable(T)` where `T` the usual result type of this function.
|
||||
|
||||
Examples:
|
||||
|
||||
```sql
|
||||
CREATE TABLE test (d Dynamic) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (NULL), (1::Int8), (2::Int16), (3::Int32), (4::Int64);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d────┬─dynamicType(d)─┐
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
│ 1 │ Int8 │
|
||||
│ 2 │ Int16 │
|
||||
│ 3 │ Int32 │
|
||||
│ 4 │ Int64 │
|
||||
└──────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
|
||||
│ 1 │ 2 │ Dynamic │ Int16 │
|
||||
│ 2 │ 3 │ Dynamic │ Int32 │
|
||||
│ 3 │ 4 │ Dynamic │ Int64 │
|
||||
│ 4 │ 5 │ Dynamic │ Int64 │
|
||||
└──────┴──────┴─────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, d + d AS res, toTypeName(res), dynamicType(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
|
||||
│ 1 │ 2 │ Dynamic │ Int16 │
|
||||
│ 2 │ 4 │ Dynamic │ Int32 │
|
||||
│ 3 │ 6 │ Dynamic │ Int64 │
|
||||
│ 4 │ 8 │ Dynamic │ Int64 │
|
||||
└──────┴──────┴─────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, d < 3 AS res, toTypeName(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d────┬──res─┬─toTypeName(res)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(UInt8) │
|
||||
│ 1 │ 1 │ Nullable(UInt8) │
|
||||
│ 2 │ 1 │ Nullable(UInt8) │
|
||||
│ 3 │ 0 │ Nullable(UInt8) │
|
||||
│ 4 │ 0 │ Nullable(UInt8) │
|
||||
└──────┴──────┴─────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, exp2(d) AS res, toTypeName(res) FROM test;
|
||||
```
|
||||
|
||||
```sql
|
||||
┌─d────┬──res─┬─toTypeName(res)───┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(Float64) │
|
||||
│ 1 │ 2 │ Nullable(Float64) │
|
||||
│ 2 │ 4 │ Nullable(Float64) │
|
||||
│ 3 │ 8 │ Nullable(Float64) │
|
||||
│ 4 │ 16 │ Nullable(Float64) │
|
||||
└──────┴──────┴───────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
TRUNCATE TABLE test;
|
||||
INSERT INTO test VALUES (NULL), ('str_1'), ('str_2');
|
||||
SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬─dynamicType(d)─┐
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
│ str_1 │ String │
|
||||
│ str_2 │ String │
|
||||
└───────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, upper(d) AS res, toTypeName(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬─res───┬─toTypeName(res)──┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(String) │
|
||||
│ str_1 │ STR_1 │ Nullable(String) │
|
||||
│ str_2 │ STR_2 │ Nullable(String) │
|
||||
└───────┴───────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, extract(d, '([0-3])') AS res, toTypeName(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬─res──┬─toTypeName(res)──┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(String) │
|
||||
│ str_1 │ 1 │ Nullable(String) │
|
||||
│ str_2 │ 2 │ Nullable(String) │
|
||||
└───────┴──────┴──────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
TRUNCATE TABLE test;
|
||||
INSERT INTO test VALUES (NULL), ([1, 2]), ([3, 4]);
|
||||
SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬─dynamicType(d)─┐
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
│ [1,2] │ Array(Int64) │
|
||||
│ [3,4] │ Array(Int64) │
|
||||
└───────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, d[1] AS res, toTypeName(res), dynamicType(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬─res──┬─toTypeName(res)─┬─dynamicType(res)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Dynamic │ None │
|
||||
│ [1,2] │ 1 │ Dynamic │ Int64 │
|
||||
│ [3,4] │ 3 │ Dynamic │ Int64 │
|
||||
└───────┴──────┴─────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
If function cannot be executed on some type inside `Dynamic` column, the exception will be thrown:
|
||||
|
||||
```sql
|
||||
INSERT INTO test VALUES (42), (43), ('str_1');
|
||||
SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
|
||||
```text
|
||||
┌─d─────┬─dynamicType(d)─┐
|
||||
│ 42 │ Int64 │
|
||||
│ 43 │ Int64 │
|
||||
│ str_1 │ String │
|
||||
└───────┴────────────────┘
|
||||
┌─d─────┬─dynamicType(d)─┐
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
│ [1,2] │ Array(Int64) │
|
||||
│ [3,4] │ Array(Int64) │
|
||||
└───────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
Received exception:
|
||||
Code: 43. DB::Exception: Illegal types Array(Int64) and UInt8 of arguments of function plus: while executing 'FUNCTION plus(__table1.d : 3, 1_UInt8 :: 1) -> plus(__table1.d, 1_UInt8) Dynamic : 0'. (ILLEGAL_TYPE_OF_ARGUMENT)
|
||||
```
|
||||
|
||||
We can filter out unneeded types:
|
||||
|
||||
```sql
|
||||
SELECT d, d + 1 AS res, toTypeName(res), dynamicType(res) FROM test WHERE dynamicType(d) NOT IN ('String', 'Array(Int64)', 'None')
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d──┬─res─┬─toTypeName(res)─┬─dynamicType(res)─┐
|
||||
│ 42 │ 43 │ Dynamic │ Int64 │
|
||||
│ 43 │ 44 │ Dynamic │ Int64 │
|
||||
└────┴─────┴─────────────────┴──────────────────┘
|
||||
```
|
||||
|
||||
Or extract required type as subcolumn:
|
||||
|
||||
```sql
|
||||
SELECT d, d.Int64 + 1 AS res, toTypeName(res) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d─────┬──res─┬─toTypeName(res)─┐
|
||||
│ 42 │ 43 │ Nullable(Int64) │
|
||||
│ 43 │ 44 │ Nullable(Int64) │
|
||||
│ str_1 │ ᴺᵁᴸᴸ │ Nullable(Int64) │
|
||||
└───────┴──────┴─────────────────┘
|
||||
┌─d─────┬──res─┬─toTypeName(res)─┐
|
||||
│ ᴺᵁᴸᴸ │ ᴺᵁᴸᴸ │ Nullable(Int64) │
|
||||
│ [1,2] │ ᴺᵁᴸᴸ │ Nullable(Int64) │
|
||||
│ [3,4] │ ᴺᵁᴸᴸ │ Nullable(Int64) │
|
||||
└───────┴──────┴─────────────────┘
|
||||
```
|
||||
|
||||
## Using Dynamic type in ORDER BY and GROUP BY
|
||||
|
||||
During `ORDER BY` and `GROUP BY` values of `Dynamic` types are compared similar to values of `Variant` type:
|
||||
The result of operator `<` for values `d1` with underlying type `T1` and `d2` with underlying type `T2` of a type `Dynamic` is defined as follows:
|
||||
- If `T1 = T2 = T`, the result will be `d1.T < d2.T` (underlying values will be compared).
|
||||
- If `T1 != T2`, the result will be `T1 < T2` (type names will be compared).
|
||||
|
||||
Examples:
|
||||
```sql
|
||||
CREATE TABLE test (d1 Dynamic, d2 Dynamic) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (42, 42), (42, 43), (42, 'abc'), (42, [1, 2, 3]), (42, []), (42, NULL);
|
||||
CREATE TABLE test (d Dynamic) ENGINE=Memory;
|
||||
INSERT INTO test VALUES (42), (43), ('abc'), ('abd'), ([1, 2, 3]), ([]), (NULL);
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d2, dynamicType(d2) as d2_type from test order by d2;
|
||||
SELECT d, dynamicType(d) FROM test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d2──────┬─d2_type──────┐
|
||||
│ [] │ Array(Int64) │
|
||||
│ [1,2,3] │ Array(Int64) │
|
||||
│ 42 │ Int64 │
|
||||
│ 43 │ Int64 │
|
||||
│ abc │ String │
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
└─────────┴──────────────┘
|
||||
┌─d───────┬─dynamicType(d)─┐
|
||||
│ 42 │ Int64 │
|
||||
│ 43 │ Int64 │
|
||||
│ abc │ String │
|
||||
│ abd │ String │
|
||||
│ [1,2,3] │ Array(Int64) │
|
||||
│ [] │ Array(Int64) │
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
└─────────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d1, dynamicType(d1) as d1_type, d2, dynamicType(d2) as d2_type, d1 = d2, d1 < d2, d1 > d2 from test;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d1─┬─d1_type─┬─d2──────┬─d2_type──────┬─equals(d1, d2)─┬─less(d1, d2)─┬─greater(d1, d2)─┐
|
||||
│ 42 │ Int64 │ 42 │ Int64 │ 1 │ 0 │ 0 │
|
||||
│ 42 │ Int64 │ 43 │ Int64 │ 0 │ 1 │ 0 │
|
||||
│ 42 │ Int64 │ abc │ String │ 0 │ 1 │ 0 │
|
||||
│ 42 │ Int64 │ [1,2,3] │ Array(Int64) │ 0 │ 0 │ 1 │
|
||||
│ 42 │ Int64 │ [] │ Array(Int64) │ 0 │ 0 │ 1 │
|
||||
│ 42 │ Int64 │ ᴺᵁᴸᴸ │ None │ 0 │ 1 │ 0 │
|
||||
└────┴─────────┴─────────┴──────────────┴────────────────┴──────────────┴─────────────────┘
|
||||
```
|
||||
|
||||
If you need to find the row with specific `Dynamic` value, you can do one of the following:
|
||||
|
||||
- Cast value to the `Dynamic` type:
|
||||
|
||||
```sql
|
||||
SELECT * FROM test WHERE d2 == [1,2,3]::Array(UInt32)::Dynamic;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d1─┬─d2──────┐
|
||||
│ 42 │ [1,2,3] │
|
||||
└────┴─────────┘
|
||||
```
|
||||
|
||||
- Compare `Dynamic` subcolumn with required type:
|
||||
|
||||
```sql
|
||||
SELECT * FROM test WHERE d2.`Array(Int65)` == [1,2,3] -- or using variantElement(d2, 'Array(UInt32)')
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d1─┬─d2──────┐
|
||||
│ 42 │ [1,2,3] │
|
||||
└────┴─────────┘
|
||||
```
|
||||
|
||||
Sometimes it can be useful to make additional check on dynamic type as subcolumns with complex types like `Array/Map/Tuple` cannot be inside `Nullable` and will have default values instead of `NULL` on rows with different types:
|
||||
|
||||
```sql
|
||||
SELECT d2, d2.`Array(Int64)`, dynamicType(d2) FROM test WHERE d2.`Array(Int64)` == [];
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d2───┬─d2.Array(UInt32)─┬─dynamicType(d2)─┐
|
||||
│ 42 │ [] │ Int64 │
|
||||
│ 43 │ [] │ Int64 │
|
||||
│ abc │ [] │ String │
|
||||
│ [] │ [] │ Array(Int32) │
|
||||
│ ᴺᵁᴸᴸ │ [] │ None │
|
||||
└──────┴──────────────────┴─────────────────┘
|
||||
SELECT d, dynamicType(d) FROM test ORDER BY d;
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d2, d2.`Array(Int64)`, dynamicType(d2) FROM test WHERE dynamicType(d2) == 'Array(Int64)' AND d2.`Array(Int64)` == [];
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d2─┬─d2.Array(UInt32)─┬─dynamicType(d2)─┐
|
||||
│ [] │ [] │ Array(Int64) │
|
||||
└────┴──────────────────┴─────────────────┘
|
||||
┌─d───────┬─dynamicType(d)─┐
|
||||
│ [] │ Array(Int64) │
|
||||
│ [1,2,3] │ Array(Int64) │
|
||||
│ 42 │ Int64 │
|
||||
│ 43 │ Int64 │
|
||||
│ abc │ String │
|
||||
│ abd │ String │
|
||||
│ ᴺᵁᴸᴸ │ None │
|
||||
└─────────┴────────────────┘
|
||||
```
|
||||
|
||||
**Note:** values of dynamic types with different numeric types are considered as different values and not compared between each other, their type names are compared instead.
|
||||
@ -411,6 +569,21 @@ SELECT d, dynamicType(d) FROM test ORDER by d;
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
```sql
|
||||
SELECT d, dynamicType(d) FROM test GROUP by d;
|
||||
```
|
||||
|
||||
```text
|
||||
┌─d───┬─dynamicType(d)─┐
|
||||
│ 1 │ Int64 │
|
||||
│ 100 │ UInt32 │
|
||||
│ 1 │ UInt32 │
|
||||
│ 100 │ Int64 │
|
||||
└─────┴────────────────┘
|
||||
```
|
||||
|
||||
**Note**: the described comparison rule is not applied during execution of comparison functions like `<`/`>`/`=` and others because of [special work](#using-dynamic-type-in-functions) of functions with `Dynamic` type
|
||||
|
||||
## Reaching the limit in number of different data types stored inside Dynamic
|
||||
|
||||
`Dynamic` data type can store only limited number of different data types as separate subcolumns. By default, this limit is 32, but you can change it in type declaration using syntax `Dynamic(max_types=N)` where N is between 0 and 254 (due to implementation details, it's impossible to have more than 254 different data types that can be stored as separate subcolumns inside Dynamic).
|
||||
|
@ -226,9 +226,9 @@ Result:
|
||||
|
||||
## bitTestAll
|
||||
|
||||
Returns result of [logical conjuction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
||||
Returns result of [logical conjunction](https://en.wikipedia.org/wiki/Logical_conjunction) (AND operator) of all bits at given positions. Counting is right-to-left, starting at 0.
|
||||
|
||||
The conjuction for bit-wise operations:
|
||||
The conjunction for bit-wise operations:
|
||||
|
||||
0 AND 0 = 0
|
||||
|
||||
@ -251,7 +251,7 @@ SELECT bitTestAll(number, index1, index2, index3, index4, ...)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Result of the logical conjuction. [UInt8](../data-types/int-uint.md).
|
||||
- Result of the logical conjunction. [UInt8](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
|
@ -1972,7 +1972,7 @@ Result:
|
||||
|
||||
## toISOYear
|
||||
|
||||
Converts a date, or date with time, to a UInt16 number containing the ISO Year number.
|
||||
Converts a date, or date with time, to the ISO year as a UInt16 number.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -1982,11 +1982,11 @@ toISOYear(value)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — The value with date or date with time.
|
||||
- `value` — The value with date or date with time. [Date](../data-types/date.md), [Date32](../data-types/date32.md), [DateTime](../data-types/datetime.md) or [DateTime64](../data-types/datetime64.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the current ISO year number. [UInt16](../data-types/int-uint.md).
|
||||
- The input value converted to a ISO year number. [UInt16](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -1995,7 +1995,7 @@ Query:
|
||||
```sql
|
||||
SELECT
|
||||
toISOYear(toDate('2024/10/02')) as year1,
|
||||
toISOYear(toDateTime('2024/10/02 01:30:00')) as year2
|
||||
toISOYear(toDateTime('2024-10-02 01:30:00')) as year2
|
||||
```
|
||||
|
||||
Result:
|
||||
@ -2010,6 +2010,38 @@ Result:
|
||||
|
||||
Converts a date, or date with time, to a UInt8 number containing the ISO Week number.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toISOWeek(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — The value with date or date with time.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the current ISO week number. [UInt8](../data-types/int-uint.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toISOWeek(toDate('2024/10/02')) AS week1,
|
||||
toISOWeek(toDateTime('2024/10/02 01:30:00')) AS week2
|
||||
```
|
||||
|
||||
Response:
|
||||
|
||||
```response
|
||||
┌─week1─┬─week2─┐
|
||||
│ 40 │ 40 │
|
||||
└───────┴───────┘
|
||||
```
|
||||
|
||||
## toWeek
|
||||
|
||||
This function returns the week number for date or datetime. The two-argument form of `toWeek()` enables you to specify whether the week starts on Sunday or Monday and whether the return value should be in the range from 0 to 53 or from 1 to 53. If the mode argument is omitted, the default mode is 0.
|
||||
@ -2901,7 +2933,42 @@ The same as ‘today() - 1’.
|
||||
|
||||
## timeSlot
|
||||
|
||||
Rounds the time to the half hour.
|
||||
Round the time to the start of a half-an-hour length interval.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
timeSlot(time[, time_zone])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `time` — Time to round to the start of a half-an-hour length interval. [DateTime](../data-types/datetime.md)/[Date32](../data-types/date32.md)/[DateTime64](../data-types/datetime64.md).
|
||||
- `time_zone` — A String type const value or an expression representing the time zone. [String](../data-types/string.md).
|
||||
|
||||
:::note
|
||||
Though this function can take values of the extended types `Date32` and `DateTime64` as an argument, passing it a time outside the normal range (year 1970 to 2149 for `Date` / 2106 for `DateTime`) will produce wrong results.
|
||||
:::
|
||||
|
||||
**Return type**
|
||||
|
||||
- Returns the time rounded to the start of a half-an-hour length interval. [DateTime](../data-types/datetime.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT timeSlot(toDateTime('2000-01-02 03:04:05', 'UTC'));
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─timeSlot(toDateTime('2000-01-02 03:04:05', 'UTC'))─┐
|
||||
│ 2000-01-02 03:00:00 │
|
||||
└────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## toYYYYMM
|
||||
|
||||
|
@ -288,11 +288,11 @@ toIPv4OrDefault(value)
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — The value with IPv4 address.
|
||||
- `value` — A string-encoded IPv4 address. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to the current IPv4 address. [String](../data-types/string.md).
|
||||
- `value` converted to an IPv4 address. [IPv4](../data-types/ipv4.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -316,6 +316,38 @@ Result:
|
||||
|
||||
Same as `toIPv4`, but if the IPv4 address has an invalid format, it returns null.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toIPv4OrNull(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — A string-encoded IPv4 address. [String](../data-types/string.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` converted to an IPv4 address. [IPv4](../data-types/ipv4.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT
|
||||
toIPv4OrNull('192.168.0.1') AS s1,
|
||||
toIPv4OrNull('192.168.0') AS s2
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─s1──────────┬─s2───┐
|
||||
│ 192.168.0.1 │ ᴺᵁᴸᴸ │
|
||||
└─────────────┴──────┘
|
||||
```
|
||||
|
||||
## toIPv6OrDefault(string)
|
||||
|
||||
Same as `toIPv6`, but if the IPv6 address has an invalid format, it returns `::` (0 IPv6).
|
||||
|
@ -202,12 +202,36 @@ Result:
|
||||
|
||||
Returns the type name of the passed argument.
|
||||
|
||||
If `NULL` is passed, then the function returns type `Nullable(Nothing)`, which corresponds to ClickHouse's internal `NULL` representation.
|
||||
If `NULL` is passed, the function returns type `Nullable(Nothing)`, which corresponds to ClickHouse's internal `NULL` representation.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toTypeName(x)
|
||||
toTypeName(value)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `value` — A value of arbitrary type.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The data type name of the input value. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(123);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─toTypeName(123)─┐
|
||||
│ UInt8 │
|
||||
└─────────────────┘
|
||||
```
|
||||
|
||||
## blockSize {#blockSize}
|
||||
@ -386,13 +410,37 @@ Code: 44. DB::Exception: Received from localhost:9000. DB::Exception: Illegal ty
|
||||
|
||||
## ignore
|
||||
|
||||
Accepts any arguments, including `NULL` and does nothing. Always returns 0.
|
||||
The argument is internally still evaluated. Useful e.g. for benchmarks.
|
||||
Accepts arbitrary arguments and unconditionally returns `0`.
|
||||
The argument is still evaluated internally, making it useful for eg. benchmarking.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
ignore(x)
|
||||
ignore([arg1[, arg2[, ...]])
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- Accepts arbitrarily many arguments of arbitrary type, including `NULL`.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Returns `0`.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT ignore(0, 'ClickHouse', NULL);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─ignore(0, 'ClickHouse', NULL)─┐
|
||||
│ 0 │
|
||||
└───────────────────────────────┘
|
||||
```
|
||||
|
||||
## sleep
|
||||
@ -500,13 +548,9 @@ Useful in table engine parameters of `CREATE TABLE` queries where you need to sp
|
||||
currentDatabase()
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
- `value` returns the current database name. [String](../data-types/string.md).
|
||||
- Returns the current database name. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
@ -555,6 +599,42 @@ Result:
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## currentSchemas
|
||||
|
||||
Returns a single-element array with the name of the current database schema.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
currentSchemas(bool)
|
||||
```
|
||||
|
||||
Alias: `current_schemas`.
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `bool`: A boolean value. [Bool](../data-types/boolean.md).
|
||||
|
||||
:::note
|
||||
The boolean argument is ignored. It only exists for the sake of compatibility with the [implementation](https://www.postgresql.org/docs/7.3/functions-misc.html) of this function in PostgreSQL.
|
||||
:::
|
||||
|
||||
**Returned values**
|
||||
|
||||
- Returns a single-element array with the name of the current database
|
||||
|
||||
**Example**
|
||||
|
||||
```sql
|
||||
SELECT currentSchemas(true);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
['default']
|
||||
```
|
||||
|
||||
## isConstant
|
||||
|
||||
Returns whether the argument is a constant expression.
|
||||
@ -1797,7 +1877,7 @@ toColumnTypeName(value)
|
||||
|
||||
**Example**
|
||||
|
||||
Difference between `toTypeName ' and ' toColumnTypeName`:
|
||||
Difference between `toTypeName` and `toColumnTypeName`:
|
||||
|
||||
```sql
|
||||
SELECT toTypeName(CAST('2018-01-01 01:02:03' AS DateTime))
|
||||
@ -3873,13 +3953,15 @@ Retrieves the connection ID of the client that submitted the current query and r
|
||||
connectionId()
|
||||
```
|
||||
|
||||
Alias: `connection_id`.
|
||||
|
||||
**Parameters**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Returns an integer of type UInt64.
|
||||
The current connection ID. [UInt64](../data-types/int-uint.md).
|
||||
|
||||
**Implementation details**
|
||||
|
||||
@ -3897,40 +3979,6 @@ SELECT connectionId();
|
||||
0
|
||||
```
|
||||
|
||||
## connection_id
|
||||
|
||||
An alias of `connectionId`. Retrieves the connection ID of the client that submitted the current query and returns it as a UInt64 integer.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
connection_id()
|
||||
```
|
||||
|
||||
**Parameters**
|
||||
|
||||
None.
|
||||
|
||||
**Returned value**
|
||||
|
||||
Returns an integer of type UInt64.
|
||||
|
||||
**Implementation details**
|
||||
|
||||
This function is most useful in debugging scenarios or for internal purposes within the MySQL handler. It was created for compatibility with [MySQL's `CONNECTION_ID` function](https://dev.mysql.com/doc/refman/8.0/en/information-functions.html#function_connection-id) It is not typically used in production queries.
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT connection_id();
|
||||
```
|
||||
|
||||
```response
|
||||
0
|
||||
```
|
||||
|
||||
## getClientHTTPHeader
|
||||
|
||||
Get the value of an HTTP header.
|
||||
|
@ -755,7 +755,7 @@ Result:
|
||||
|
||||
## match {#match}
|
||||
|
||||
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular syntax](https://github.com/google/re2/wiki/Syntax).
|
||||
Returns whether string `haystack` matches the regular expression `pattern` in [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
|
||||
|
||||
Matching is based on UTF-8, e.g. `.` matches the Unicode code point `¥` which is represented in UTF-8 using two bytes. The regular
|
||||
expression must not contain null bytes. If the haystack or the pattern are not valid UTF-8, then the behavior is undefined.
|
||||
@ -852,9 +852,10 @@ multiFuzzyMatchAllIndices(haystack, distance, \[pattern<sub>1</sub>, pattern<sub
|
||||
|
||||
## extract
|
||||
|
||||
Extracts a fragment of a string using a regular expression. If `haystack` does not match the `pattern` regex, an empty string is returned.
|
||||
Returns the first match of a regular expression in a string.
|
||||
If `haystack` does not match the `pattern` regex, an empty string is returned.
|
||||
|
||||
For regex without subpatterns, the function uses the fragment that matches the entire regex. Otherwise, it uses the fragment that matches the first subpattern.
|
||||
If the regular expression has capturing groups, the function matches the input string against the first capturing group.
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -862,13 +863,36 @@ For regex without subpatterns, the function uses the fragment that matches the e
|
||||
extract(haystack, pattern)
|
||||
```
|
||||
|
||||
*Arguments**
|
||||
|
||||
- `haystack` — Input string. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- The first match of the regular expression in the haystack string. [String](../data-types/string.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT extract('number: 1, number: 2, number: 3', '\\d+') AS result;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─result─┐
|
||||
│ 1 │
|
||||
└────────┘
|
||||
```
|
||||
|
||||
## extractAll
|
||||
|
||||
Extracts all fragments of a string using a regular expression. If `haystack` does not match the `pattern` regex, an empty string is returned.
|
||||
Returns an array of all matches of a regular expression in a string. If `haystack` does not match the `pattern` regex, an empty string is returned.
|
||||
|
||||
Returns an array of strings consisting of all matches of the regex.
|
||||
|
||||
The behavior with respect to subpatterns is the same as in function `extract`.
|
||||
The behavior with respect to sub-patterns is the same as in function [`extract`](#extract).
|
||||
|
||||
**Syntax**
|
||||
|
||||
@ -876,6 +900,31 @@ The behavior with respect to subpatterns is the same as in function `extract`.
|
||||
extractAll(haystack, pattern)
|
||||
```
|
||||
|
||||
*Arguments**
|
||||
|
||||
- `haystack` — Input string. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array of matches of the regular expression in the haystack string. [Array](../data-types/array.md)([String](../data-types/string.md)).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
```sql
|
||||
SELECT extractAll('number: 1, number: 2, number: 3', '\\d+') AS result;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─result────────┐
|
||||
│ ['1','2','3'] │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## extractAllGroupsHorizontal
|
||||
|
||||
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where the first array includes all fragments matching the first group, the second array - matching the second group, etc.
|
||||
@ -891,7 +940,7 @@ extractAllGroupsHorizontal(haystack, pattern)
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — Input string. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -915,6 +964,39 @@ Result:
|
||||
└──────────────────────────────────────────────────────────────────────────────────────────┘
|
||||
```
|
||||
|
||||
## extractGroups
|
||||
|
||||
Match all groups of given input string with a given regular expression, returns an array of arrays of matches.
|
||||
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
extractGroups(haystack, pattern)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — Input string. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
- Array of arrays of matches. [Array](../data-types/array.md).
|
||||
|
||||
**Example**
|
||||
|
||||
``` sql
|
||||
SELECT extractGroups('hello abc=111 world', '("[^"]+"|\\w+)=("[^"]+"|\\w+)') AS result;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
``` text
|
||||
┌─result────────┐
|
||||
│ ['abc','111'] │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## extractAllGroupsVertical
|
||||
|
||||
Matches all groups of the `haystack` string using the `pattern` regular expression. Returns an array of arrays, where each array includes matching fragments from every group. Fragments are grouped in order of appearance in the `haystack`.
|
||||
@ -928,7 +1010,7 @@ extractAllGroupsVertical(haystack, pattern)
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — Input string. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
|
||||
- `pattern` — Regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). Must contain groups, each group enclosed in parentheses. If `pattern` contains no groups, an exception is thrown. [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -1484,7 +1566,7 @@ countMatches(haystack, pattern)
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
|
||||
- `pattern` — The regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
@ -1529,7 +1611,7 @@ countMatchesCaseInsensitive(haystack, pattern)
|
||||
**Arguments**
|
||||
|
||||
- `haystack` — The string to search in. [String](../../sql-reference/syntax.md#syntax-string-literal).
|
||||
- `pattern` — The regular expression with [re2 syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
|
||||
- `pattern` — The regular expression with [re2 regular expression syntax](https://github.com/google/re2/wiki/Syntax). [String](../data-types/string.md).
|
||||
|
||||
**Returned value**
|
||||
|
||||
|
@ -5230,15 +5230,52 @@ Result:
|
||||
|
||||
Also see the `toUnixTimestamp` function.
|
||||
|
||||
## toFixedString(s, N)
|
||||
## toFixedString
|
||||
|
||||
Converts a [String](../data-types/string.md) type argument to a [FixedString(N)](../data-types/fixedstring.md) type (a string of fixed length N).
|
||||
If the string has fewer bytes than N, it is padded with null bytes to the right. If the string has more bytes than N, an exception is thrown.
|
||||
|
||||
## toStringCutToZero(s)
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toFixedString(s, N)
|
||||
```
|
||||
|
||||
**Arguments**
|
||||
|
||||
- `s` — A String to convert to a fixed string. [String](../data-types/string.md).
|
||||
- `N` — Length N. [UInt8](../data-types/int-uint.md)
|
||||
|
||||
**Returned value**
|
||||
|
||||
- An N length fixed string of `s`. [FixedString](../data-types/fixedstring.md).
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
||||
``` sql
|
||||
SELECT toFixedString('foo', 8) AS s;
|
||||
```
|
||||
|
||||
Result:
|
||||
|
||||
```response
|
||||
┌─s─────────────┐
|
||||
│ foo\0\0\0\0\0 │
|
||||
└───────────────┘
|
||||
```
|
||||
|
||||
## toStringCutToZero
|
||||
|
||||
Accepts a String or FixedString argument. Returns the String with the content truncated at the first zero byte found.
|
||||
|
||||
**Syntax**
|
||||
|
||||
```sql
|
||||
toStringCutToZero(s)
|
||||
```
|
||||
|
||||
**Example**
|
||||
|
||||
Query:
|
||||
|
@ -272,8 +272,7 @@ ALTER TABLE table_name MODIFY COLUMN column_name RESET SETTING max_compress_bloc
|
||||
|
||||
## MATERIALIZE COLUMN
|
||||
|
||||
Materializes a column with a `DEFAULT` or `MATERIALIZED` value expression.
|
||||
This statement can be used to rewrite existing column data after a `DEFAULT` or `MATERIALIZED` expression has been added or updated (which only updates the metadata but does not change existing data).
|
||||
Materializes a column with a `DEFAULT` or `MATERIALIZED` value expression. When adding a materialized column using `ALTER TABLE table_name ADD COLUMN column_name MATERIALIZED`, existing rows without materialized values are not automatically filled. `MATERIALIZE COLUMN` statement can be used to rewrite existing column data after a `DEFAULT` or `MATERIALIZED` expression has been added or updated (which only updates the metadata but does not change existing data).
|
||||
Implemented as a [mutation](/docs/en/sql-reference/statements/alter/index.md#mutations).
|
||||
|
||||
For columns with a new or updated `MATERIALIZED` value expression, all existing rows are rewritten.
|
||||
|
@ -41,7 +41,7 @@ ORDER BY ts, event_type;
|
||||
│ 2020-01-02 00:00:00 │ imp │ 2 │
|
||||
└─────────────────────┴────────────┴─────────────────┘
|
||||
|
||||
-- Let's add the new measurment `cost`
|
||||
-- Let's add the new measurement `cost`
|
||||
-- and the new dimension `browser`.
|
||||
|
||||
ALTER TABLE events
|
||||
|
@ -46,7 +46,7 @@ The `CHECK TABLE` query supports the following table engines:
|
||||
- [StripeLog](../../engines/table-engines/log-family/stripelog.md)
|
||||
- [MergeTree family](../../engines/table-engines/mergetree-family/mergetree.md)
|
||||
|
||||
Performed over the tables with another table engines causes an `NOT_IMPLEMETED` exception.
|
||||
Performed over the tables with another table engines causes an `NOT_IMPLEMENTED` exception.
|
||||
|
||||
Engines from the `*Log` family do not provide automatic data recovery on failure. Use the `CHECK TABLE` query to track data loss in a timely manner.
|
||||
|
||||
|
@ -442,7 +442,7 @@ DEFLATE_QPL is not available in ClickHouse Cloud.
|
||||
|
||||
### Specialized Codecs
|
||||
|
||||
These codecs are designed to make compression more effective by exploiting specific features of the data. Some of these codecs do not compress data themself, they instead preprocess the data such that a second compression stage using a general-purpose codec can achieve a higher data compression rate.
|
||||
These codecs are designed to make compression more effective by exploiting specific features of the data. Some of these codecs do not compress data themselves, they instead preprocess the data such that a second compression stage using a general-purpose codec can achieve a higher data compression rate.
|
||||
|
||||
#### Delta
|
||||
|
||||
|
@ -135,15 +135,15 @@ To change SQL security for an existing view, use
|
||||
ALTER TABLE MODIFY SQL SECURITY { DEFINER | INVOKER | NONE } [DEFINER = { user | CURRENT_USER }]
|
||||
```
|
||||
|
||||
### Examples sql security
|
||||
### Examples
|
||||
```sql
|
||||
CREATE test_view
|
||||
CREATE VIEW test_view
|
||||
DEFINER = alice SQL SECURITY DEFINER
|
||||
AS SELECT ...
|
||||
```
|
||||
|
||||
```sql
|
||||
CREATE test_view
|
||||
CREATE VIEW test_view
|
||||
SQL SECURITY INVOKER
|
||||
AS SELECT ...
|
||||
```
|
||||
@ -184,14 +184,6 @@ Differences from regular non-refreshable materialized views:
|
||||
The settings in the `REFRESH ... SETTINGS` part of the query are refresh settings (e.g. `refresh_retries`), distinct from regular settings (e.g. `max_threads`). Regular settings can be specified using `SETTINGS` at the end of the query.
|
||||
:::
|
||||
|
||||
:::note
|
||||
Refreshable materialized views are a work in progress. Setting `allow_experimental_refreshable_materialized_view = 1` is required for creating one. Current limitations:
|
||||
* not compatible with Replicated database or table engines
|
||||
* It is not supported in ClickHouse Cloud
|
||||
* require [Atomic database engine](../../../engines/database-engines/atomic.md),
|
||||
* no limit on number of concurrent refreshes.
|
||||
:::
|
||||
|
||||
### Refresh Schedule
|
||||
|
||||
Example refresh schedules:
|
||||
@ -202,7 +194,11 @@ REFRESH EVERY 1 MONTH OFFSET 5 DAY 2 HOUR -- on 6th day of every month, at 2:00
|
||||
REFRESH EVERY 2 WEEK OFFSET 5 DAY 15 HOUR 10 MINUTE -- every other Saturday, at 3:10 pm
|
||||
REFRESH EVERY 30 MINUTE -- at 00:00, 00:30, 01:00, 01:30, etc
|
||||
REFRESH AFTER 30 MINUTE -- 30 minutes after the previous refresh completes, no alignment with time of day
|
||||
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax errror, OFFSET is not allowed with AFTER
|
||||
-- REFRESH AFTER 1 HOUR OFFSET 1 MINUTE -- syntax error, OFFSET is not allowed with AFTER
|
||||
REFRESH EVERY 1 WEEK 2 DAYS -- every 9 days, not on any particular day of the week or month;
|
||||
-- specifically, when day number (since 1969-12-29) is divisible by 9
|
||||
REFRESH EVERY 5 MONTHS -- every 5 months, different months each year (as 12 is not divisible by 5);
|
||||
-- specifically, when month number (since 1970-01) is divisible by 5
|
||||
```
|
||||
|
||||
`RANDOMIZE FOR` randomly adjusts the time of each refresh, e.g.:
|
||||
@ -214,6 +210,16 @@ At most one refresh may be running at a time, for a given view. E.g. if a view w
|
||||
|
||||
Additionally, a refresh is started immediately after the materialized view is created, unless `EMPTY` is specified in the `CREATE` query. If `EMPTY` is specified, the first refresh happens according to schedule.
|
||||
|
||||
### In Replicated DB
|
||||
|
||||
If the refreshable materialized view is in a [Replicated database](../../../engines/database-engines/replicated.md), the replicas coordinate with each other such that only one replica performs the refresh at each scheduled time. [ReplicatedMergeTree](../../../engines/table-engines/mergetree-family/replication.md) table engine is required, so that all replicas see the data produced by the refresh.
|
||||
|
||||
In `APPEND` mode, coordination can be disabled using `SETTINGS all_replicas = 1`. This makes replicas do refreshes independently of each other. In this case ReplicatedMergeTree is not required.
|
||||
|
||||
In non-`APPEND` mode, only coordinated refreshing is supported. For uncoordinated, use `Atomic` database and `CREATE ... ON CLUSTER` query to create refreshable materialized views on all replicas.
|
||||
|
||||
The coordination is done through Keeper. The znode path is determined by [default_replica_path](../../../operations/server-configuration-parameters/settings.md#default_replica_path) server setting.
|
||||
|
||||
### Dependencies {#refresh-dependencies}
|
||||
|
||||
`DEPENDS ON` synchronizes refreshes of different tables. By way of example, suppose there's a chain of two refreshable materialized views:
|
||||
@ -277,6 +283,8 @@ The status of all refreshable materialized views is available in table [`system.
|
||||
|
||||
To manually stop, start, trigger, or cancel refreshes use [`SYSTEM STOP|START|REFRESH|CANCEL VIEW`](../system.md#refreshable-materialized-views).
|
||||
|
||||
To wait for a refresh to complete, use [`SYSTEM WAIT VIEW`](../system.md#refreshable-materialized-views). In particular, useful for waiting for initial refresh after creating a view.
|
||||
|
||||
:::note
|
||||
Fun fact: the refresh query is allowed to read from the view that's being refreshed, seeing pre-refresh version of the data. This means you can implement Conway's game of life: https://pastila.nl/?00021a4b/d6156ff819c83d490ad2dcec05676865#O0LGWTO7maUQIA4AcGUtlA==
|
||||
:::
|
||||
|
@ -233,15 +233,20 @@ Hierarchy of privileges:
|
||||
- `addressToSymbol`
|
||||
- `demangle`
|
||||
- [SOURCES](#sources)
|
||||
- `AZURE`
|
||||
- `FILE`
|
||||
- `URL`
|
||||
- `REMOTE`
|
||||
- `YSQL`
|
||||
- `ODBC`
|
||||
- `JDBC`
|
||||
- `HDFS`
|
||||
- `S3`
|
||||
- `HIVE`
|
||||
- `JDBC`
|
||||
- `MONGO`
|
||||
- `MYSQL`
|
||||
- `ODBC`
|
||||
- `POSTGRES`
|
||||
- `REDIS`
|
||||
- `REMOTE`
|
||||
- `S3`
|
||||
- `SQLITE`
|
||||
- `URL`
|
||||
- [dictGet](#dictget)
|
||||
- [displaySecretsInShowAndSelect](#displaysecretsinshowandselect)
|
||||
- [NAMED COLLECTION ADMIN](#named-collection-admin)
|
||||
@ -510,15 +515,20 @@ Allows using [introspection](../../operations/optimizing-performance/sampling-qu
|
||||
Allows using external data sources. Applies to [table engines](../../engines/table-engines/index.md) and [table functions](../../sql-reference/table-functions/index.md#table-functions).
|
||||
|
||||
- `SOURCES`. Level: `GROUP`
|
||||
- `AZURE`. Level: `GLOBAL`
|
||||
- `FILE`. Level: `GLOBAL`
|
||||
- `URL`. Level: `GLOBAL`
|
||||
- `REMOTE`. Level: `GLOBAL`
|
||||
- `YSQL`. Level: `GLOBAL`
|
||||
- `ODBC`. Level: `GLOBAL`
|
||||
- `JDBC`. Level: `GLOBAL`
|
||||
- `HDFS`. Level: `GLOBAL`
|
||||
- `S3`. Level: `GLOBAL`
|
||||
- `HIVE`. Level: `GLOBAL`
|
||||
- `JDBC`. Level: `GLOBAL`
|
||||
- `MONGO`. Level: `GLOBAL`
|
||||
- `MYSQL`. Level: `GLOBAL`
|
||||
- `ODBC`. Level: `GLOBAL`
|
||||
- `POSTGRES`. Level: `GLOBAL`
|
||||
- `REDIS`. Level: `GLOBAL`
|
||||
- `REMOTE`. Level: `GLOBAL`
|
||||
- `S3`. Level: `GLOBAL`
|
||||
- `SQLITE`. Level: `GLOBAL`
|
||||
- `URL`. Level: `GLOBAL`
|
||||
|
||||
The `SOURCES` privilege enables use of all the sources. Also you can grant a privilege for each source individually. To use sources, you need additional privileges.
|
||||
|
||||
|
@ -29,7 +29,7 @@ The condition could be any expression based on your requirements.
|
||||
Here is a simple example that intersects the numbers 1 to 10 with the numbers 3 to 8:
|
||||
|
||||
```sql
|
||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,6);
|
||||
SELECT number FROM numbers(1,10) INTERSECT SELECT number FROM numbers(3,8);
|
||||
```
|
||||
|
||||
Result:
|
||||
|
@ -351,11 +351,15 @@ Shows privileges for a user.
|
||||
**Syntax**
|
||||
|
||||
``` sql
|
||||
SHOW GRANTS [FOR user1 [, user2 ...]]
|
||||
SHOW GRANTS [FOR user1 [, user2 ...]] [WITH IMPLICIT] [FINAL]
|
||||
```
|
||||
|
||||
If user is not specified, the query returns privileges for the current user.
|
||||
|
||||
The `WITH IMPLICIT` modifier allows to show the implicit grants (e.g., `GRANT SELECT ON system.one`)
|
||||
|
||||
The `FINAL` modifier merges all grants from the user and its granted roles (with inheritance)
|
||||
|
||||
## SHOW CREATE USER
|
||||
|
||||
Shows parameters that were used at a [user creation](../../sql-reference/statements/create/user.md).
|
||||
|
@ -565,3 +565,13 @@ If there's a refresh in progress for the given view, interrupt and cancel it. Ot
|
||||
```sql
|
||||
SYSTEM CANCEL VIEW [db.]name
|
||||
```
|
||||
|
||||
### SYSTEM WAIT VIEW
|
||||
|
||||
Waits for the running refresh to complete. If no refresh is running, returns immediately. If the latest refresh attempt failed, reports an error.
|
||||
|
||||
Can be used right after creating a new refreshable materialized view (without EMPTY keyword) to wait for the initial refresh to complete.
|
||||
|
||||
```sql
|
||||
SYSTEM WAIT VIEW [db.]name
|
||||
```
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: iceberg
|
||||
|
||||
# iceberg Table Function
|
||||
|
||||
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure or locally stored.
|
||||
Provides a read-only table-like interface to Apache [Iceberg](https://iceberg.apache.org/) tables in Amazon S3, Azure, HDFS or locally stored.
|
||||
|
||||
## Syntax
|
||||
|
||||
@ -17,13 +17,16 @@ icebergS3(named_collection[, option=value [,..]])
|
||||
icebergAzure(connection_string|storage_account_url, container_name, blobpath, [,account_name], [,account_key] [,format] [,compression_method])
|
||||
icebergAzure(named_collection[, option=value [,..]])
|
||||
|
||||
icebergHDFS(path_to_table, [,format] [,compression_method])
|
||||
icebergHDFS(named_collection[, option=value [,..]])
|
||||
|
||||
icebergLocal(path_to_table, [,format] [,compression_method])
|
||||
icebergLocal(named_collection[, option=value [,..]])
|
||||
```
|
||||
|
||||
## Arguments
|
||||
|
||||
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage` and `file` correspondingly.
|
||||
Description of the arguments coincides with description of arguments in table functions `s3`, `azureBlobStorage`, `HDFS` and `file` correspondingly.
|
||||
`format` stands for the format of data files in the Iceberg table.
|
||||
|
||||
**Returned value**
|
||||
@ -36,7 +39,7 @@ SELECT * FROM icebergS3('http://test.s3.amazonaws.com/clickhouse-bucket/test_tab
|
||||
```
|
||||
|
||||
:::important
|
||||
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure` ans `icebergLocal` table engines.
|
||||
ClickHouse currently supports reading v1 and v2 of the Iceberg format via the `icebergS3`, `icebergAzure`, `icebergHDFS` and `icebergLocal` table functions and `IcebergS3`, `icebergAzure`, `IcebergHDFS` and `IcebergLocal` table engines.
|
||||
:::
|
||||
|
||||
## Defining a named collection
|
||||
|
@ -1,2 +0,0 @@
|
||||
# Just an empty yaml file. Keep it alone.
|
||||
{}
|
13
docs/ru/interfaces/third-party/gui.md
vendored
13
docs/ru/interfaces/third-party/gui.md
vendored
@ -9,6 +9,19 @@ sidebar_label: "Визуальные интерфейсы от сторонни
|
||||
|
||||
## С открытым исходным кодом {#s-otkrytym-iskhodnym-kodom}
|
||||
|
||||
### ChartDB {#chartdb}
|
||||
|
||||
[ChartDB](https://chartdb.io) — бесплатный и открытый инструмент для визуализации и проектирования схем баз данных, включая ClickHouse, с помощью одного запроса. Разработан на базе React, обеспечивает удобный и простой интерфейс, не требует ввода учетных данных или регистрации.
|
||||
|
||||
Основные возможности:
|
||||
|
||||
- Визуализация схем: мгновенно импортируйте и визуализируйте схему ClickHouse, включая ER-диаграммы с материализованными представлениями и стандартными представлениями, показывающими ссылки на таблицы;
|
||||
- Экспорт DDL с поддержкой ИИ: легко генерируйте DDL-скрипты для лучшего управления и документирования схем;
|
||||
- Поддержка различных SQL-диалектов: совместим с широким спектром SQL-диалектов, что делает его универсальным для разных сред баз данных;
|
||||
- Без регистрации и учетных данных: весь функционал доступен прямо в браузере, обеспечивая бесшовное и безопасное использование.
|
||||
|
||||
[Исходный код ChartDB](https://github.com/chartdb/chartdb).
|
||||
|
||||
### Tabix {#tabix}
|
||||
|
||||
Веб-интерфейс для ClickHouse в проекте [Tabix](https://github.com/tabixio/tabix).
|
||||
|
@ -33,7 +33,7 @@ sidebar_label: "Отличительные возможности ClickHouse"
|
||||
|
||||
## Поддержка SQL {#sql-support}
|
||||
|
||||
ClickHouse поддерживает [декларативный язык запросов на основе SQL](../sql-reference/index.md) и во [многих случаях](../sql-reference/ansi.mdx) совпадающий с SQL-стандартом.
|
||||
ClickHouse поддерживает декларативный язык запросов SQL.
|
||||
|
||||
Поддерживаются [GROUP BY](../sql-reference/statements/select/group-by.md), [ORDER BY](../sql-reference/statements/select/order-by.md), подзапросы в секциях [FROM](../sql-reference/statements/select/from.md), [IN](../sql-reference/operators/in.md), [JOIN](../sql-reference/statements/select/join.md), [функции window](../sql-reference/window-functions/index.mdx), а также скалярные подзапросы.
|
||||
|
||||
|
@ -6,7 +6,7 @@ sidebar_label: "Настройки пользователей"
|
||||
|
||||
# Настройки пользователей {#nastroiki-polzovatelei}
|
||||
|
||||
Раздел `users` конфигурационного файла `user.xml` содержит настройки для пользователей.
|
||||
Раздел `users` конфигурационного файла `users.xml` содержит настройки для пользователей.
|
||||
|
||||
:::note Информация
|
||||
Для управления пользователями рекомендуется использовать [SQL-ориентированный воркфлоу](../access-rights.md#access-control), который также поддерживается в ClickHouse.
|
||||
@ -30,7 +30,7 @@ sidebar_label: "Настройки пользователей"
|
||||
<profile>profile_name</profile>
|
||||
|
||||
<quota>default</quota>
|
||||
<default_database>default<default_database>
|
||||
<default_database>default</default_database>
|
||||
<databases>
|
||||
<database_name>
|
||||
<table_name>
|
||||
|
@ -93,7 +93,7 @@ WITH anySimpleState(number) AS c SELECT toTypeName(c), c FROM numbers(1);
|
||||
## -Distinct {#agg-functions-combinator-distinct}
|
||||
|
||||
При наличии комбинатора Distinct, каждое уникальное значение аргументов, будет учитано в агрегатной функции только один раз.
|
||||
Примеры: `sum(DISTINCT x)`, `groupArray(DISTINCT x)`, `corrStableDistinct(DISTINCT x, y)` и т.п.
|
||||
Примеры: `sum(DISTINCT x)` (или `sumDistinct(x)`), `groupArray(DISTINCT x)` (или `groupArrayDistinct(x)`), `corrStable(DISTINCT x, y)` (или `corrStableDistinct(x, y)`) и т.п.
|
||||
|
||||
## -OrDefault {#agg-functions-combinator-ordefault}
|
||||
|
||||
|
@ -1,10 +0,0 @@
|
||||
---
|
||||
slug: /ru/sql-reference/ansi
|
||||
sidebar_position: 40
|
||||
sidebar_label: ANSI Compatibility
|
||||
title: "ANSI Compatibility"
|
||||
---
|
||||
|
||||
import Content from '@site/docs/en/sql-reference/ansi.md';
|
||||
|
||||
<Content />
|
@ -464,7 +464,7 @@ GRANT INSERT(x,y) ON db.table TO john
|
||||
- `FILE`. Уровень: `GLOBAL`
|
||||
- `URL`. Уровень: `GLOBAL`
|
||||
- `REMOTE`. Уровень: `GLOBAL`
|
||||
- `YSQL`. Уровень: `GLOBAL`
|
||||
- `MYSQL`. Уровень: `GLOBAL`
|
||||
- `ODBC`. Уровень: `GLOBAL`
|
||||
- `JDBC`. Уровень: `GLOBAL`
|
||||
- `HDFS`. Уровень: `GLOBAL`
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user